From dedf9e5654764f47182d83887607141cfb03e3b0 Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Thu, 30 Nov 2017 14:52:25 -0800 Subject: [PATCH 1/5] Create prototype of versioned plugin docs --- docs/versioned-plugins/codecs.asciidoc | 6 + .../codecs/rubydebug-index.asciidoc | 14 + .../codecs/rubydebug-v3.0.3.asciidoc | 47 ++ .../codecs/rubydebug-v3.0.4.asciidoc | 47 ++ docs/versioned-plugins/filters.asciidoc | 7 + .../filters/multiline-index.asciidoc | 14 + .../filters/multiline-v3.0.3.asciidoc | 194 +++++ .../filters/multiline-v3.0.4.asciidoc | 195 +++++ .../versioned-plugins/include/filter.asciidoc | 177 +++++ docs/versioned-plugins/include/input.asciidoc | 104 +++ .../versioned-plugins/include/output.asciidoc | 51 ++ .../include/plugin-intro.asciidoc | 13 + .../include/plugin_header.asciidoc | 43 ++ .../include/version-list-intro.asciidoc | 13 + docs/versioned-plugins/index.asciidoc | 20 + docs/versioned-plugins/inputs.asciidoc | 8 + .../inputs/beats-index.asciidoc | 14 + .../inputs/beats-v5.0.0.asciidoc | 222 ++++++ .../inputs/beats-v5.0.1.asciidoc | 222 ++++++ .../inputs/dead_letter_queue-index.asciidoc | 15 + .../inputs/dead_letter_queue-v1.0.0.asciidoc | 110 +++ .../inputs/dead_letter_queue-v1.1.0.asciidoc | 110 +++ docs/versioned-plugins/outputs.asciidoc | 6 + .../outputs/elasticsearch-index.asciidoc | 14 + .../outputs/elasticsearch-v8.1.0.asciidoc | 664 ++++++++++++++++++ .../outputs/elasticsearch-v8.1.1.asciidoc | 664 ++++++++++++++++++ 26 files changed, 2994 insertions(+) create mode 100644 docs/versioned-plugins/codecs.asciidoc create mode 100644 docs/versioned-plugins/codecs/rubydebug-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters.asciidoc create mode 100644 docs/versioned-plugins/filters/multiline-index.asciidoc create mode 100644 docs/versioned-plugins/filters/multiline-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/multiline-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/include/filter.asciidoc create mode 100644 docs/versioned-plugins/include/input.asciidoc create mode 100644 docs/versioned-plugins/include/output.asciidoc create mode 100644 docs/versioned-plugins/include/plugin-intro.asciidoc create mode 100644 docs/versioned-plugins/include/plugin_header.asciidoc create mode 100644 docs/versioned-plugins/include/version-list-intro.asciidoc create mode 100644 docs/versioned-plugins/index.asciidoc create mode 100644 docs/versioned-plugins/inputs.asciidoc create mode 100644 docs/versioned-plugins/inputs/beats-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc create mode 100644 docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.0.0.asciidoc create mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.1.0.asciidoc create mode 100644 docs/versioned-plugins/outputs.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v8.1.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc diff --git a/docs/versioned-plugins/codecs.asciidoc b/docs/versioned-plugins/codecs.asciidoc new file mode 100644 index 000000000..8de113467 --- /dev/null +++ b/docs/versioned-plugins/codecs.asciidoc @@ -0,0 +1,6 @@ +:type: codec +:type_uc: Codec + +include::include/plugin-intro.asciidoc[] + +include::codecs/rubydebug-index.asciidoc[] diff --git a/docs/versioned-plugins/codecs/rubydebug-index.asciidoc b/docs/versioned-plugins/codecs/rubydebug-index.asciidoc new file mode 100644 index 000000000..b2028589f --- /dev/null +++ b/docs/versioned-plugins/codecs/rubydebug-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: rubydebug +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | Aug 21, 2017 (latest) +| <> | Aug 21, 2017 +|======================================================================= + + +include::rubydebug-v3.0.4.asciidoc[] +include::rubydebug-v3.0.3.asciidoc[] diff --git a/docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc new file mode 100644 index 000000000..872beb2dd --- /dev/null +++ b/docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc @@ -0,0 +1,47 @@ +:plugin: rubydebug +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-08-21 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-rubydebug/blob/v3.0.4/CHANGELOG.md +:include_path: ../include +:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rubydebug codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The rubydebug codec will output your Logstash event data using +the Ruby Awesome Print library. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rubydebug Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-metadata>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-metadata"] +===== `metadata` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Should the event's metadata be included? + + diff --git a/docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc new file mode 100644 index 000000000..fefc2524f --- /dev/null +++ b/docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc @@ -0,0 +1,47 @@ +:plugin: rubydebug +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-21 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-rubydebug/blob/v3.0.4/CHANGELOG.md +:include_path: ../include +:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rubydebug codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The rubydebug codec will output your Logstash event data using +the Ruby Awesome Print library. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rubydebug Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-metadata>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-metadata"] +===== `metadata` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Should the event's metadata be included? + + diff --git a/docs/versioned-plugins/filters.asciidoc b/docs/versioned-plugins/filters.asciidoc new file mode 100644 index 000000000..f43eb727c --- /dev/null +++ b/docs/versioned-plugins/filters.asciidoc @@ -0,0 +1,7 @@ +:type: filter +:type_uc: Filter + +include::include/plugin-intro.asciidoc[] + +include::filters/multiline-index.asciidoc[] + diff --git a/docs/versioned-plugins/filters/multiline-index.asciidoc b/docs/versioned-plugins/filters/multiline-index.asciidoc new file mode 100644 index 000000000..b7fb6858c --- /dev/null +++ b/docs/versioned-plugins/filters/multiline-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: multiline +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | Aug 15, 2017 (latest) +| <> | Aug 15, 2017 +|======================================================================= + + +include::multiline-v3.0.4.asciidoc[] +include::multiline-v3.0.3.asciidoc[] diff --git a/docs/versioned-plugins/filters/multiline-v3.0.3.asciidoc b/docs/versioned-plugins/filters/multiline-v3.0.3.asciidoc new file mode 100644 index 000000000..18a03368f --- /dev/null +++ b/docs/versioned-plugins/filters/multiline-v3.0.3.asciidoc @@ -0,0 +1,194 @@ +:plugin: multiline +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-multiline/blob/v3.0.4/CHANGELOG.md +:include_path: ../include +:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Multiline filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter will collapse multiline messages from a single source into one Logstash event. + +The original goal of this filter was to allow joining of multi-line messages +from files into a single event. For example - joining java exception and +stacktrace messages into a single event. + +NOTE: This filter will not work with multiple worker threads `-w 2` on the logstash command line. + +The config looks like this: +[source,ruby] + filter { + multiline { + pattern => "pattern, a regexp" + negate => boolean + what => "previous" or "next" + } + } + +The `pattern` should be a regexp ({logstash-ref}/plugins-filters-grok.html[grok] patterns are +supported) which matches what you believe to be an indicator that the field +is part of an event consisting of multiple lines of log data. + +The `what` must be `previous` or `next` and indicates the relation +to the multi-line event. + +The `negate` can be `true` or `false` (defaults to `false`). If `true`, a +message not matching the pattern will constitute a match of the multiline +filter and the `what` will be applied. (vice-versa is also true) + +For example, Java stack traces are multiline and usually have the message +starting at the far-left, with each subsequent line indented. Do this: +[source,ruby] + filter { + multiline { + pattern => "^\s" + what => "previous" + } + } + +This says that any line starting with whitespace belongs to the previous line. + +Another example is C line continuations (backslash). Here's how to do that: +[source,ruby] + filter { + multiline { + pattern => "\\$" + what => "next" + } + } + +This says that any line ending with a backslash should be combined with the +following line. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Multiline Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-allow_duplicates>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-max_age>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-negate>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-pattern>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-source>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-stream_identity>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-what>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["previous", "next"]`|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-allow_duplicates"] +===== `allow_duplicates` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Allow duplcate values on the source field. + +[id="{version}-plugins-{type}s-{plugin}-max_age"] +===== `max_age` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +The maximum age an event can be (in seconds) before it is automatically +flushed. + +[id="{version}-plugins-{type}s-{plugin}-negate"] +===== `negate` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Negate the regexp pattern ('if not matched') + +[id="{version}-plugins-{type}s-{plugin}-pattern"] +===== `pattern` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The expression to match. The same matching engine as the +{logstash-ref}/plugins-filters-grok.html[grok] filter is used, so the expression can contain +a plain regular expression or one that also contains grok patterns. + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"message"` + +The field name to execute the pattern match on. + +[id="{version}-plugins-{type}s-{plugin}-stream_identity"] +===== `stream_identity` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"%{host}.%{path}.%{type}"` + +The stream identity is how the multiline filter determines which stream an +event belongs to. This is generally used for differentiating, say, events +coming from multiple files in the same file input, or multiple connections +coming from a tcp input. + +The default value here is usually what you want, but there are some cases +where you want to change it. One such example is if you are using a tcp +input with only one client connecting at any time. If that client +reconnects (due to error or client restart), then logstash will identify +the new connection as a new stream and break any multiline goodness that +may have occurred between the old and new connection. To solve this use +case, you can use `%{@source_host}.%{@type}` instead. + +[id="{version}-plugins-{type}s-{plugin}-what"] +===== `what` + + * This is a required setting. + * Value can be any of: `previous`, `next` + * There is no default value for this setting. + +If the pattern matched, does event belong to the next or previous event? + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/multiline-v3.0.4.asciidoc b/docs/versioned-plugins/filters/multiline-v3.0.4.asciidoc new file mode 100644 index 000000000..40a82e1ba --- /dev/null +++ b/docs/versioned-plugins/filters/multiline-v3.0.4.asciidoc @@ -0,0 +1,195 @@ +:plugin: multiline +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-multiline/blob/v3.0.4/CHANGELOG.md +:include_path: ../include +:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Multiline filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + +This filter will collapse multiline messages from a single source into one Logstash event. + +The original goal of this filter was to allow joining of multi-line messages +from files into a single event. For example - joining java exception and +stacktrace messages into a single event. + +NOTE: This filter will not work with multiple worker threads `-w 2` on the logstash command line. + +The config looks like this: +[source,ruby] + filter { + multiline { + pattern => "pattern, a regexp" + negate => boolean + what => "previous" or "next" + } + } + +The `pattern` should be a regexp ({logstash-ref}/plugins-filters-grok.html[grok] patterns are +supported) which matches what you believe to be an indicator that the field +is part of an event consisting of multiple lines of log data. + +The `what` must be `previous` or `next` and indicates the relation +to the multi-line event. + +The `negate` can be `true` or `false` (defaults to `false`). If `true`, a +message not matching the pattern will constitute a match of the multiline +filter and the `what` will be applied. (vice-versa is also true) + +For example, Java stack traces are multiline and usually have the message +starting at the far-left, with each subsequent line indented. Do this: +[source,ruby] + filter { + multiline { + pattern => "^\s" + what => "previous" + } + } + +This says that any line starting with whitespace belongs to the previous line. + +Another example is C line continuations (backslash). Here's how to do that: +[source,ruby] + filter { + multiline { + pattern => "\\$" + what => "next" + } + } + +This says that any line ending with a backslash should be combined with the +following line. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Multiline Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-allow_duplicates>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-max_age>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-negate>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-pattern>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-source>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-stream_identity>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-what>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["previous", "next"]`|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-allow_duplicates"] +===== `allow_duplicates` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Allow duplcate values on the source field. + +[id="{version}-plugins-{type}s-{plugin}-max_age"] +===== `max_age` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +The maximum age an event can be (in seconds) before it is automatically +flushed. + +[id="{version}-plugins-{type}s-{plugin}-negate"] +===== `negate` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Negate the regexp pattern ('if not matched') + +[id="{version}-plugins-{type}s-{plugin}-pattern"] +===== `pattern` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The expression to match. The same matching engine as the +{logstash-ref}/plugins-filters-grok.html[grok] filter is used, so the expression can contain +a plain regular expression or one that also contains grok patterns. + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"message"` + +The field name to execute the pattern match on. + +[id="{version}-plugins-{type}s-{plugin}-stream_identity"] +===== `stream_identity` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"%{host}.%{path}.%{type}"` + +The stream identity is how the multiline filter determines which stream an +event belongs to. This is generally used for differentiating, say, events +coming from multiple files in the same file input, or multiple connections +coming from a tcp input. + +The default value here is usually what you want, but there are some cases +where you want to change it. One such example is if you are using a tcp +input with only one client connecting at any time. If that client +reconnects (due to error or client restart), then logstash will identify +the new connection as a new stream and break any multiline goodness that +may have occurred between the old and new connection. To solve this use +case, you can use `%{@source_host}.%{@type}` instead. + +[id="{version}-plugins-{type}s-{plugin}-what"] +===== `what` + + * This is a required setting. + * Value can be any of: `previous`, `next` + * There is no default value for this setting. + +If the pattern matched, does event belong to the next or previous event? + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/include/filter.asciidoc b/docs/versioned-plugins/include/filter.asciidoc new file mode 100644 index 000000000..cc01bba7c --- /dev/null +++ b/docs/versioned-plugins/include/filter.asciidoc @@ -0,0 +1,177 @@ +==== Common Options + +The following configuration options are supported by all filter plugins: + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-add_field>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-add_tag>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-enable_metric>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-periodic_flush>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-remove_field>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-remove_tag>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +|======================================================================= + +[id="{version}-plugins-{type}s-{plugin}-add_field"] +===== `add_field` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +If this filter is successful, add any arbitrary fields to this event. +Field names can be dynamic and include parts of the event using the `%{field}`. + +Example: + +["source","json",subs="attributes"] + filter { + {plugin} { + add_field => { "foo_%\{somefield\}" => "Hello world, from %\{host\}" } + } + } + +["source","json",subs="attributes"] + # You can also add multiple fields at once: + filter { + {plugin} { + add_field => { + "foo_%\{somefield\}" => "Hello world, from %\{host\}" + "new_field" => "new_static_value" + } + } + } + +If the event has field `"somefield" == "hello"` this filter, on success, +would add field `foo_hello` if it is present, with the +value above and the `%{host}` piece replaced with that value from the +event. The second example would also add a hardcoded field. + +[id="{version}-plugins-{type}s-{plugin}-add_tag"] +===== `add_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +If this filter is successful, add arbitrary tags to the event. +Tags can be dynamic and include parts of the event using the `%{field}` +syntax. + +Example: + +["source","json",subs="attributes"] + filter { + {plugin} { + add_tag => [ "foo_%\{somefield\}" ] + } + } + +["source","json",subs="attributes"] + # You can also add multiple tags at once: + filter { + {plugin} { + add_tag => [ "foo_%\{somefield\}", "taggedy_tag"] + } + } + +If the event has field `"somefield" == "hello"` this filter, on success, +would add a tag `foo_hello` (and the second example would of course add a `taggedy_tag` tag). + +[id="{version}-plugins-{type}s-{plugin}-enable_metric"] +===== `enable_metric` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Disable or enable metric logging for this specific plugin instance +by default we record all the metrics we can, but you can disable metrics collection +for a specific plugin. + +[id="{version}-plugins-{type}s-{plugin}-id"] +===== `id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. +It is strongly recommended to set this ID in your configuration. This is particularly useful +when you have two or more plugins of the same type, for example, if you have 2 {plugin} filters. +Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs. + + +["source","json",subs="attributes"] + filter { + {plugin} { + id => "ABC" + } + } + +[id="{version}-plugins-{type}s-{plugin}-periodic_flush"] +===== `periodic_flush` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Call the filter flush method at regular interval. +Optional. + +[id="{version}-plugins-{type}s-{plugin}-remove_field"] +===== `remove_field` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +If this filter is successful, remove arbitrary fields from this event. +Fields names can be dynamic and include parts of the event using the %{field} +Example: + +["source","json",subs="attributes"] + filter { + {plugin} { + remove_field => [ "foo_%\{somefield\}" ] + } + } + +["source","json",subs="attributes"] + # You can also remove multiple fields at once: + filter { + {plugin} { + remove_field => [ "foo_%\{somefield\}", "my_extraneous_field" ] + } + } + +If the event has field `"somefield" == "hello"` this filter, on success, +would remove the field with name `foo_hello` if it is present. The second +example would remove an additional, non-dynamic field. + +[id="{version}-plugins-{type}s-{plugin}-remove_tag"] +===== `remove_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +If this filter is successful, remove arbitrary tags from the event. +Tags can be dynamic and include parts of the event using the `%{field}` +syntax. + +Example: + +["source","json",subs="attributes"] + filter { + {plugin} { + remove_tag => [ "foo_%\{somefield\}" ] + } + } + +["source","json",subs="attributes"] + # You can also remove multiple tags at once: + filter { + {plugin} { + remove_tag => [ "foo_%\{somefield\}", "sad_unwanted_tag"] + } + } + +If the event has field `"somefield" == "hello"` this filter, on success, +would remove the tag `foo_hello` if it is present. The second example +would remove a sad, unwanted tag as well. diff --git a/docs/versioned-plugins/include/input.asciidoc b/docs/versioned-plugins/include/input.asciidoc new file mode 100644 index 000000000..698864401 --- /dev/null +++ b/docs/versioned-plugins/include/input.asciidoc @@ -0,0 +1,104 @@ +==== Common Options + +The following configuration options are supported by all input plugins: +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-add_field>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-codec>> |{logstash-ref}/configuration-file-structure.html#codec[codec]|No +| <<{version}-plugins-{type}s-{plugin}-enable_metric>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-tags>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + + +==== Details + +  + +[id="{version}-plugins-{type}s-{plugin}-add_field"] +===== `add_field` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * Default value is `{}` + +Add a field to an event + +[id="{version}-plugins-{type}s-{plugin}-codec"] +===== `codec` + + * Value type is {logstash-ref}/configuration-file-structure.html#codec[codec] + * Default value is `"plain"` + +The codec used for input data. Input codecs are a convenient method for decoding your data before it enters the input, without needing a separate filter in your Logstash pipeline. + + +[id="{version}-plugins-{type}s-{plugin}-enable_metric"] +===== `enable_metric` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Disable or enable metric logging for this specific plugin instance +by default we record all the metrics we can, but you can disable metrics collection +for a specific plugin. + +[id="{version}-plugins-{type}s-{plugin}-id"] +===== `id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. +It is strongly recommended to set this ID in your configuration. This is particularly useful +when you have two or more plugins of the same type, for example, if you have 2 {plugin} inputs. +Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs. + +["source","json",subs="attributes"] +--------------------------------------------------------------------------------------------------- +input { + {plugin} { + id => "my_plugin_id" + } +} +--------------------------------------------------------------------------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-tags"] +===== `tags` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * There is no default value for this setting. + +Add any number of arbitrary tags to your event. + +This can help with processing later. + +[id="{version}-plugins-{type}s-{plugin}-type"] +===== `type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Add a `type` field to all events handled by this input. + +Types are used mainly for filter activation. + +The type is stored as part of the event itself, so you can +also use the type to search for it in Kibana. + +If you try to set a type on an event that already has one (for +example when you send an event from a shipper to an indexer) then +a new input will not override the existing type. A type set at +the shipper stays with that event for its life even +when sent to another Logstash server. + +ifeval::["{type}"=="input" and "{plugin}"=="beats"] + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in +Logstash, it is ignored. + +endif::[] + diff --git a/docs/versioned-plugins/include/output.asciidoc b/docs/versioned-plugins/include/output.asciidoc new file mode 100644 index 000000000..9a5cd3d16 --- /dev/null +++ b/docs/versioned-plugins/include/output.asciidoc @@ -0,0 +1,51 @@ +==== Common Options + +The following configuration options are supported by all output plugins: + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-codec>> |{logstash-ref}/configuration-file-structure.html#codec[codec]|No +| <<{version}-plugins-{type}s-{plugin}-enable_metric>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +[id="{version}-plugins-{type}s-{plugin}-codec"] +===== `codec` + + * Value type is {logstash-ref}/configuration-file-structure.html#codec[codec] + * Default value is `"plain"` + +The codec used for output data. Output codecs are a convenient method for encoding your data before it leaves the output, without needing a separate filter in your Logstash pipeline. + +[id="{version}-plugins-{type}s-{plugin}-enable_metric"] +===== `enable_metric` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Disable or enable metric logging for this specific plugin instance +by default we record all the metrics we can, but you can disable metrics collection +for a specific plugin. + +[id="{version}-plugins-{type}s-{plugin}-id"] +===== `id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Add a unique `ID` to the plugin configuration. If no ID is specified, Logstash will generate one. +It is strongly recommended to set this ID in your configuration. This is particularly useful +when you have two or more plugins of the same type, for example, if you have 2 {plugin} outputs. +Adding a named ID in this case will help in monitoring Logstash when using the monitoring APIs. + +["source","json",subs="attributes"] +--------------------------------------------------------------------------------------------------- +output { + {plugin} { + id => "my_plugin_id" + } +} +--------------------------------------------------------------------------------------------------- + + diff --git a/docs/versioned-plugins/include/plugin-intro.asciidoc b/docs/versioned-plugins/include/plugin-intro.asciidoc new file mode 100644 index 000000000..2087fcb7b --- /dev/null +++ b/docs/versioned-plugins/include/plugin-intro.asciidoc @@ -0,0 +1,13 @@ +[id="{type}-plugins"] += {type_uc} plugins + +[partintro] +-- +Looking for a specific version of the Logstash plugin docs? You've come to the +right place. This section contains all available versions of the documentation +for the Logstash {type} plugins. + +Want to learn how to use Logstash? See the +{logstash-ref}/index.html[Logstash Reference]. + +-- diff --git a/docs/versioned-plugins/include/plugin_header.asciidoc b/docs/versioned-plugins/include/plugin_header.asciidoc new file mode 100644 index 000000000..2c4e168a6 --- /dev/null +++ b/docs/versioned-plugins/include/plugin_header.asciidoc @@ -0,0 +1,43 @@ +ifeval::["{versioned_docs}"!="true"] +++++ +{plugin} +++++ +endif::[] +ifeval::["{versioned_docs}"=="true"] +++++ +{plugin} {version} +++++ +endif::[] + +* Plugin version: {version} +* Released on: {release_date} +* {changelog_url}[Changelog] + +ifeval::["{versioned_docs}"!="true"] + +For other plugin versions, see the +<<{type}-{plugin}-index,Versioned {plugin} {type} plugin docs>>. + +endif::[] + +ifeval::["{versioned_docs}"=="true"] + +For other versions, see the <<{type}-{plugin}-index,overview list>>. + +To learn more about Logstash, see the {logstash-ref}/index.html[Logstash Reference]. + +endif::[] + +ifeval::[("{default_plugin}"=="0") and ("{versioned_docs}"!="true")] + +==== Installation + +For plugins not bundled by default, it is easy to install by running +bin/logstash-plugin install logstash-{type}-{plugin}+. See {logstash-ref}/working-with-plugins.html[Working with plugins] for more details. + +endif::[] + +==== Getting Help + +For questions about the plugin, open a topic in the http://discuss.elastic.co[Discuss] forums. For bugs or feature requests, open an issue in https://github.com/logstash-plugins/logstash-{type}-{plugin}[Github]. +For the list of Elastic supported plugins, please consult the https://www.elastic.co/support/matrix#show_logstash_plugins[Elastic Support Matrix]. + diff --git a/docs/versioned-plugins/include/version-list-intro.asciidoc b/docs/versioned-plugins/include/version-list-intro.asciidoc new file mode 100644 index 000000000..5ba89ed4b --- /dev/null +++ b/docs/versioned-plugins/include/version-list-intro.asciidoc @@ -0,0 +1,13 @@ +[id="{type}-{plugin}-index"] + +== Versioned {plugin} {type} plugin docs +++++ +{plugin} +++++ + +This page lists all available versions of the documentation for this plugin. +To see which version of the plugin you have installed, run `bin/logstash-plugin +list --verbose`. + +NOTE: Versioned plugin documentation is not available for plugins released prior +to Logstash 6.0. diff --git a/docs/versioned-plugins/index.asciidoc b/docs/versioned-plugins/index.asciidoc new file mode 100644 index 000000000..d1d20fb6b --- /dev/null +++ b/docs/versioned-plugins/index.asciidoc @@ -0,0 +1,20 @@ +:versioned_docs: true + +// Set include path for static files that live in the logstash repo +:include_path: ../include + +include::{asciidoc-dir}/../../shared/attributes.asciidoc[] + +// Override logstash-ref setting imported from shared/attributes.asciidoc +:logstash-ref: http://www.elastic.co/guide/en/logstash/current + +[[logstash-plugin-reference]] += Versioned Plugin Reference + +include::inputs.asciidoc[] + +include::outputs.asciidoc[] + +include::filters.asciidoc[] + +include::codecs.asciidoc[] diff --git a/docs/versioned-plugins/inputs.asciidoc b/docs/versioned-plugins/inputs.asciidoc new file mode 100644 index 000000000..7aef78c5e --- /dev/null +++ b/docs/versioned-plugins/inputs.asciidoc @@ -0,0 +1,8 @@ +:type: input +:type_uc: Input + +include::include/plugin-intro.asciidoc[] + +include::inputs/beats-index.asciidoc[] + +include::inputs/dead_letter_queue-index.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-index.asciidoc b/docs/versioned-plugins/inputs/beats-index.asciidoc new file mode 100644 index 000000000..48e18c6f0 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: beats +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | Aug 15, 2017 (latest) +| <> | Aug 15, 2017 +|======================================================================= + + +include::beats-v5.0.1.asciidoc[] +include::beats-v5.0.0.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc new file mode 100644 index 000000000..8a1925631 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc @@ -0,0 +1,222 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.0 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.1/CHANGELOG.md +:include_path: ../include +:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +{logstash-ref}/plugins-codecs-multiline.html[multiline] codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc new file mode 100644 index 000000000..7f77a2e4d --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc @@ -0,0 +1,222 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.1 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.1/CHANGELOG.md +:include_path: ../include +:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +{logstash-ref}/plugins-codecs-multiline.html[multiline] codec to handle multiline events. Doing so will +result in the failure to start Logstash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-index.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-index.asciidoc new file mode 100644 index 000000000..c2e33c563 --- /dev/null +++ b/docs/versioned-plugins/inputs/dead_letter_queue-index.asciidoc @@ -0,0 +1,15 @@ +:plugin: dead_letter_queue +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | Aug 25, 2017 (latest) +| <> | Aug 25, 2017 +|======================================================================= + + +include::dead_letter_queue-v1.1.0.asciidoc[] +include::dead_letter_queue-v1.0.0.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.0.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.0.asciidoc new file mode 100644 index 000000000..0a84eb021 --- /dev/null +++ b/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.0.asciidoc @@ -0,0 +1,110 @@ +:plugin: dead_letter_queue +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.0 +:release_date: 2017-08-25 +:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.1.0/CHANGELOG.md +:include_path: ../include +:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Dead_letter_queue input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Logstash input to read events from Logstash's dead letter queue. + +[source, sh] +----------------------------------------- +input { + dead_letter_queue { + path => "/var/logstash/data/dead_letter_queue" + start_timestamp => "2017-04-04T23:40:37" + } +} +----------------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Dead_letter_queue Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes +| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] +===== `commit_offsets` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Specifies whether this input should commit offsets as it processes the events. +Typically you specify `false` when you want to iterate multiple times over the +events in the dead letter queue, but don't want to save state. This is when you +are exploring the events in the dead letter queue. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +Path to the dead letter queue directory that was created by a Logstash instance. +This is the path from which "dead" events are read and is typically configured +in the original Logstash instance with the setting `path.dead_letter_queue`. + +[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] +===== `pipeline_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"main"` + +ID of the pipeline whose events you want to read from. + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Path of the sincedb database file (keeps track of the current position of dead letter queue) that +will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. + +NOTE: This value must be a file path and not a directory path. + +[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] +===== `start_timestamp` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Timestamp in ISO8601 format from when you want to start processing the events from. +For example, `2017-04-04T23:40:37`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.0.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.0.asciidoc new file mode 100644 index 000000000..09c12af85 --- /dev/null +++ b/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.0.asciidoc @@ -0,0 +1,110 @@ +:plugin: dead_letter_queue +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.1.0 +:release_date: 2017-08-25 +:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.1.0/CHANGELOG.md +:include_path: ../include +:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Dead_letter_queue input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Logstash input to read events from Logstash's dead letter queue. + +[source, sh] +----------------------------------------- +input { + dead_letter_queue { + path => "/var/logstash/data/dead_letter_queue" + start_timestamp => "2017-04-04T23:40:37" + } +} +----------------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Dead_letter_queue Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes +| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] +===== `commit_offsets` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Specifies whether this input should commit offsets as it processes the events. +Typically you specify `false` when you want to iterate multiple times over the +events in the dead letter queue, but don't want to save state. This is when you +are exploring the events in the dead letter queue. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +Path to the dead letter queue directory that was created by a Logstash instance. +This is the path from which "dead" events are read and is typically configured +in the original Logstash instance with the setting `path.dead_letter_queue`. + +[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] +===== `pipeline_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"main"` + +ID of the pipeline whose events you want to read from. + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Path of the sincedb database file (keeps track of the current position of dead letter queue) that +will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. + +NOTE: This value must be a file path and not a directory path. + +[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] +===== `start_timestamp` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Timestamp in ISO8601 format from when you want to start processing the events from. +For example, `2017-04-04T23:40:37`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs.asciidoc b/docs/versioned-plugins/outputs.asciidoc new file mode 100644 index 000000000..c1f22fd24 --- /dev/null +++ b/docs/versioned-plugins/outputs.asciidoc @@ -0,0 +1,6 @@ +:type: output +:type_uc: Output + +include::include/plugin-intro.asciidoc[] + +include::outputs/elasticsearch-index.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-index.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-index.asciidoc new file mode 100644 index 000000000..521825202 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: elasticsearch +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | Aug 21, 2017 (latest) +| <> | Aug 21, 2017 +|======================================================================= + + +include::elasticsearch-v8.1.1.asciidoc[] +include::elasticsearch-v8.1.0.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.1.0.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.1.0.asciidoc new file mode 100644 index 000000000..0002dbb2a --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v8.1.0.asciidoc @@ -0,0 +1,664 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v8.1.0 +:release_date: 2017-08-21 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v8.1.1/CHANGELOG.md +:include_path: ../include +:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 8.1.1 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: +- 400 and 404 errors are sent to the DLQ if enabled. If a DLQ is not enabled a log message will be emitted and the event will be dropped. +- 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc new file mode 100644 index 000000000..9186a90b3 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc @@ -0,0 +1,664 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v8.1.1 +:release_date: 2017-08-21 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v8.1.1/CHANGELOG.md +:include_path: ../include +:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 8.1.1 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: +- 400 and 404 errors are sent to the DLQ if enabled. If a DLQ is not enabled a log message will be emitted and the event will be dropped. +- 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] From 7b7eac67870ef1e44894566ca09c5415df1c0d30 Mon Sep 17 00:00:00 2001 From: Joao Duarte Date: Thu, 11 Jan 2018 16:48:37 +0000 Subject: [PATCH 2/5] remove examples --- docs/versioned-plugins/codecs.asciidoc | 6 - .../codecs/rubydebug-index.asciidoc | 14 - .../codecs/rubydebug-v3.0.3.asciidoc | 47 -- .../codecs/rubydebug-v3.0.4.asciidoc | 47 -- docs/versioned-plugins/filters.asciidoc | 7 - .../filters/multiline-index.asciidoc | 14 - .../filters/multiline-v3.0.3.asciidoc | 194 ----- .../filters/multiline-v3.0.4.asciidoc | 195 ----- docs/versioned-plugins/inputs.asciidoc | 8 - .../inputs/beats-index.asciidoc | 14 - .../inputs/beats-v5.0.0.asciidoc | 222 ------ .../inputs/beats-v5.0.1.asciidoc | 222 ------ .../inputs/dead_letter_queue-index.asciidoc | 15 - .../inputs/dead_letter_queue-v1.0.0.asciidoc | 110 --- .../inputs/dead_letter_queue-v1.1.0.asciidoc | 110 --- docs/versioned-plugins/outputs.asciidoc | 6 - .../outputs/elasticsearch-index.asciidoc | 14 - .../outputs/elasticsearch-v8.1.0.asciidoc | 664 ------------------ .../outputs/elasticsearch-v8.1.1.asciidoc | 664 ------------------ 19 files changed, 2573 deletions(-) delete mode 100644 docs/versioned-plugins/codecs.asciidoc delete mode 100644 docs/versioned-plugins/codecs/rubydebug-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters.asciidoc delete mode 100644 docs/versioned-plugins/filters/multiline-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/multiline-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/multiline-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs.asciidoc delete mode 100644 docs/versioned-plugins/inputs/beats-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc delete mode 100644 docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.0.0.asciidoc delete mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.1.0.asciidoc delete mode 100644 docs/versioned-plugins/outputs.asciidoc delete mode 100644 docs/versioned-plugins/outputs/elasticsearch-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/elasticsearch-v8.1.0.asciidoc delete mode 100644 docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc diff --git a/docs/versioned-plugins/codecs.asciidoc b/docs/versioned-plugins/codecs.asciidoc deleted file mode 100644 index 8de113467..000000000 --- a/docs/versioned-plugins/codecs.asciidoc +++ /dev/null @@ -1,6 +0,0 @@ -:type: codec -:type_uc: Codec - -include::include/plugin-intro.asciidoc[] - -include::codecs/rubydebug-index.asciidoc[] diff --git a/docs/versioned-plugins/codecs/rubydebug-index.asciidoc b/docs/versioned-plugins/codecs/rubydebug-index.asciidoc deleted file mode 100644 index b2028589f..000000000 --- a/docs/versioned-plugins/codecs/rubydebug-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: rubydebug -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | Aug 21, 2017 (latest) -| <> | Aug 21, 2017 -|======================================================================= - - -include::rubydebug-v3.0.4.asciidoc[] -include::rubydebug-v3.0.3.asciidoc[] diff --git a/docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc deleted file mode 100644 index 872beb2dd..000000000 --- a/docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc +++ /dev/null @@ -1,47 +0,0 @@ -:plugin: rubydebug -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-08-21 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-rubydebug/blob/v3.0.4/CHANGELOG.md -:include_path: ../include -:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Rubydebug codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The rubydebug codec will output your Logstash event data using -the Ruby Awesome Print library. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rubydebug Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-metadata>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-metadata"] -===== `metadata` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `false` - -Should the event's metadata be included? - - diff --git a/docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc deleted file mode 100644 index fefc2524f..000000000 --- a/docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc +++ /dev/null @@ -1,47 +0,0 @@ -:plugin: rubydebug -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-21 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-rubydebug/blob/v3.0.4/CHANGELOG.md -:include_path: ../include -:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Rubydebug codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The rubydebug codec will output your Logstash event data using -the Ruby Awesome Print library. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rubydebug Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-metadata>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-metadata"] -===== `metadata` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `false` - -Should the event's metadata be included? - - diff --git a/docs/versioned-plugins/filters.asciidoc b/docs/versioned-plugins/filters.asciidoc deleted file mode 100644 index f43eb727c..000000000 --- a/docs/versioned-plugins/filters.asciidoc +++ /dev/null @@ -1,7 +0,0 @@ -:type: filter -:type_uc: Filter - -include::include/plugin-intro.asciidoc[] - -include::filters/multiline-index.asciidoc[] - diff --git a/docs/versioned-plugins/filters/multiline-index.asciidoc b/docs/versioned-plugins/filters/multiline-index.asciidoc deleted file mode 100644 index b7fb6858c..000000000 --- a/docs/versioned-plugins/filters/multiline-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: multiline -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | Aug 15, 2017 (latest) -| <> | Aug 15, 2017 -|======================================================================= - - -include::multiline-v3.0.4.asciidoc[] -include::multiline-v3.0.3.asciidoc[] diff --git a/docs/versioned-plugins/filters/multiline-v3.0.3.asciidoc b/docs/versioned-plugins/filters/multiline-v3.0.3.asciidoc deleted file mode 100644 index 18a03368f..000000000 --- a/docs/versioned-plugins/filters/multiline-v3.0.3.asciidoc +++ /dev/null @@ -1,194 +0,0 @@ -:plugin: multiline -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-multiline/blob/v3.0.4/CHANGELOG.md -:include_path: ../include -:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Multiline filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter will collapse multiline messages from a single source into one Logstash event. - -The original goal of this filter was to allow joining of multi-line messages -from files into a single event. For example - joining java exception and -stacktrace messages into a single event. - -NOTE: This filter will not work with multiple worker threads `-w 2` on the logstash command line. - -The config looks like this: -[source,ruby] - filter { - multiline { - pattern => "pattern, a regexp" - negate => boolean - what => "previous" or "next" - } - } - -The `pattern` should be a regexp ({logstash-ref}/plugins-filters-grok.html[grok] patterns are -supported) which matches what you believe to be an indicator that the field -is part of an event consisting of multiple lines of log data. - -The `what` must be `previous` or `next` and indicates the relation -to the multi-line event. - -The `negate` can be `true` or `false` (defaults to `false`). If `true`, a -message not matching the pattern will constitute a match of the multiline -filter and the `what` will be applied. (vice-versa is also true) - -For example, Java stack traces are multiline and usually have the message -starting at the far-left, with each subsequent line indented. Do this: -[source,ruby] - filter { - multiline { - pattern => "^\s" - what => "previous" - } - } - -This says that any line starting with whitespace belongs to the previous line. - -Another example is C line continuations (backslash). Here's how to do that: -[source,ruby] - filter { - multiline { - pattern => "\\$" - what => "next" - } - } - -This says that any line ending with a backslash should be combined with the -following line. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Multiline Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-allow_duplicates>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-max_age>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-negate>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-pattern>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes -| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |{logstash-ref}/configuration-file-structure.html#array[array]|No -| <<{version}-plugins-{type}s-{plugin}-source>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-stream_identity>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-what>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["previous", "next"]`|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-allow_duplicates"] -===== `allow_duplicates` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `true` - -Allow duplcate values on the source field. - -[id="{version}-plugins-{type}s-{plugin}-max_age"] -===== `max_age` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `5` - -The maximum age an event can be (in seconds) before it is automatically -flushed. - -[id="{version}-plugins-{type}s-{plugin}-negate"] -===== `negate` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `false` - -Negate the regexp pattern ('if not matched') - -[id="{version}-plugins-{type}s-{plugin}-pattern"] -===== `pattern` - - * This is a required setting. - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -The expression to match. The same matching engine as the -{logstash-ref}/plugins-filters-grok.html[grok] filter is used, so the expression can contain -a plain regular expression or one that also contains grok patterns. - -[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] -===== `patterns_dir` - - * Value type is {logstash-ref}/configuration-file-structure.html#array[array] - * Default value is `[]` - -Logstash ships by default with a bunch of patterns, so you don't -necessarily need to define this yourself unless you are adding additional -patterns. - -Pattern files are plain text with format: -[source,ruby] - NAME PATTERN - -For example: -[source,ruby] - NUMBER \d+ - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"message"` - -The field name to execute the pattern match on. - -[id="{version}-plugins-{type}s-{plugin}-stream_identity"] -===== `stream_identity` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"%{host}.%{path}.%{type}"` - -The stream identity is how the multiline filter determines which stream an -event belongs to. This is generally used for differentiating, say, events -coming from multiple files in the same file input, or multiple connections -coming from a tcp input. - -The default value here is usually what you want, but there are some cases -where you want to change it. One such example is if you are using a tcp -input with only one client connecting at any time. If that client -reconnects (due to error or client restart), then logstash will identify -the new connection as a new stream and break any multiline goodness that -may have occurred between the old and new connection. To solve this use -case, you can use `%{@source_host}.%{@type}` instead. - -[id="{version}-plugins-{type}s-{plugin}-what"] -===== `what` - - * This is a required setting. - * Value can be any of: `previous`, `next` - * There is no default value for this setting. - -If the pattern matched, does event belong to the next or previous event? - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/multiline-v3.0.4.asciidoc b/docs/versioned-plugins/filters/multiline-v3.0.4.asciidoc deleted file mode 100644 index 40a82e1ba..000000000 --- a/docs/versioned-plugins/filters/multiline-v3.0.4.asciidoc +++ /dev/null @@ -1,195 +0,0 @@ -:plugin: multiline -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-multiline/blob/v3.0.4/CHANGELOG.md -:include_path: ../include -:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Multiline filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - -This filter will collapse multiline messages from a single source into one Logstash event. - -The original goal of this filter was to allow joining of multi-line messages -from files into a single event. For example - joining java exception and -stacktrace messages into a single event. - -NOTE: This filter will not work with multiple worker threads `-w 2` on the logstash command line. - -The config looks like this: -[source,ruby] - filter { - multiline { - pattern => "pattern, a regexp" - negate => boolean - what => "previous" or "next" - } - } - -The `pattern` should be a regexp ({logstash-ref}/plugins-filters-grok.html[grok] patterns are -supported) which matches what you believe to be an indicator that the field -is part of an event consisting of multiple lines of log data. - -The `what` must be `previous` or `next` and indicates the relation -to the multi-line event. - -The `negate` can be `true` or `false` (defaults to `false`). If `true`, a -message not matching the pattern will constitute a match of the multiline -filter and the `what` will be applied. (vice-versa is also true) - -For example, Java stack traces are multiline and usually have the message -starting at the far-left, with each subsequent line indented. Do this: -[source,ruby] - filter { - multiline { - pattern => "^\s" - what => "previous" - } - } - -This says that any line starting with whitespace belongs to the previous line. - -Another example is C line continuations (backslash). Here's how to do that: -[source,ruby] - filter { - multiline { - pattern => "\\$" - what => "next" - } - } - -This says that any line ending with a backslash should be combined with the -following line. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Multiline Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-allow_duplicates>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-max_age>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-negate>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-pattern>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes -| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |{logstash-ref}/configuration-file-structure.html#array[array]|No -| <<{version}-plugins-{type}s-{plugin}-source>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-stream_identity>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-what>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["previous", "next"]`|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-allow_duplicates"] -===== `allow_duplicates` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `true` - -Allow duplcate values on the source field. - -[id="{version}-plugins-{type}s-{plugin}-max_age"] -===== `max_age` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `5` - -The maximum age an event can be (in seconds) before it is automatically -flushed. - -[id="{version}-plugins-{type}s-{plugin}-negate"] -===== `negate` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `false` - -Negate the regexp pattern ('if not matched') - -[id="{version}-plugins-{type}s-{plugin}-pattern"] -===== `pattern` - - * This is a required setting. - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -The expression to match. The same matching engine as the -{logstash-ref}/plugins-filters-grok.html[grok] filter is used, so the expression can contain -a plain regular expression or one that also contains grok patterns. - -[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] -===== `patterns_dir` - - * Value type is {logstash-ref}/configuration-file-structure.html#array[array] - * Default value is `[]` - -Logstash ships by default with a bunch of patterns, so you don't -necessarily need to define this yourself unless you are adding additional -patterns. - -Pattern files are plain text with format: -[source,ruby] - NAME PATTERN - -For example: -[source,ruby] - NUMBER \d+ - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"message"` - -The field name to execute the pattern match on. - -[id="{version}-plugins-{type}s-{plugin}-stream_identity"] -===== `stream_identity` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"%{host}.%{path}.%{type}"` - -The stream identity is how the multiline filter determines which stream an -event belongs to. This is generally used for differentiating, say, events -coming from multiple files in the same file input, or multiple connections -coming from a tcp input. - -The default value here is usually what you want, but there are some cases -where you want to change it. One such example is if you are using a tcp -input with only one client connecting at any time. If that client -reconnects (due to error or client restart), then logstash will identify -the new connection as a new stream and break any multiline goodness that -may have occurred between the old and new connection. To solve this use -case, you can use `%{@source_host}.%{@type}` instead. - -[id="{version}-plugins-{type}s-{plugin}-what"] -===== `what` - - * This is a required setting. - * Value can be any of: `previous`, `next` - * There is no default value for this setting. - -If the pattern matched, does event belong to the next or previous event? - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs.asciidoc b/docs/versioned-plugins/inputs.asciidoc deleted file mode 100644 index 7aef78c5e..000000000 --- a/docs/versioned-plugins/inputs.asciidoc +++ /dev/null @@ -1,8 +0,0 @@ -:type: input -:type_uc: Input - -include::include/plugin-intro.asciidoc[] - -include::inputs/beats-index.asciidoc[] - -include::inputs/dead_letter_queue-index.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-index.asciidoc b/docs/versioned-plugins/inputs/beats-index.asciidoc deleted file mode 100644 index 48e18c6f0..000000000 --- a/docs/versioned-plugins/inputs/beats-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: beats -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | Aug 15, 2017 (latest) -| <> | Aug 15, 2017 -|======================================================================= - - -include::beats-v5.0.1.asciidoc[] -include::beats-v5.0.0.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc deleted file mode 100644 index 8a1925631..000000000 --- a/docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc +++ /dev/null @@ -1,222 +0,0 @@ -:plugin: beats -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.0 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.1/CHANGELOG.md -:include_path: ../include -:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] -=== Beats input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input plugin enables Logstash to receive events from the -https://www.elastic.co/products/beats[Elastic Beats] framework. - -The following example shows how to configure Logstash to listen on port -5044 for incoming Beats connections and to index into Elasticsearch: - -[source,ruby] ------------------------------------------------------------------------------- -input { - beats { - port => 5044 - } -} - -output { - elasticsearch { - hosts => "localhost:9200" - manage_template => false - index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" - document_type => "%{[@metadata][type]}" - } -} ------------------------------------------------------------------------------- - -NOTE: The Beats shipper automatically sets the `type` field on the event. -You cannot override this setting in the Logstash config. If you specify -a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in -Logstash, it is ignored. - -IMPORTANT: If you are shipping events that span multiple lines, you need to -use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events -before sending the event data to Logstash. You cannot use the -{logstash-ref}/plugins-codecs-multiline.html[multiline] codec to handle multiline events. Doing so will -result in the failure to start Logstash. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Beats Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No -| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No -| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No -| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] -===== `cipher_suites` - - * Value type is {logstash-ref}/configuration-file-structure.html#array[array] - * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` - -The list of ciphers suite to use, listed by priorities. - -[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] -===== `client_inactivity_timeout` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `60` - -Close Idle clients after X seconds of inactivity. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"0.0.0.0"` - -The IP address to listen on. - -[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] -===== `include_codec_tag` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `true` - - - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * There is no default value for this setting. - -The port to listen on. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `false` - -Events are by default sent in plain text. You can -enable encryption by setting `ssl` to true and configuring -the `ssl_certificate` and `ssl_key` options. - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] -===== `ssl_certificate` - - * Value type is {logstash-ref}/configuration-file-structure.html#path[path] - * There is no default value for this setting. - -SSL certificate to use. - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] -===== `ssl_certificate_authorities` - - * Value type is {logstash-ref}/configuration-file-structure.html#array[array] - * Default value is `[]` - -Validate client certificates against these authorities. -You can define multiple files or paths. All the certificates will -be read and added to the trust store. You need to configure the `ssl_verify_mode` -to `peer` or `force_peer` to enable the verification. - - -[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] -===== `ssl_handshake_timeout` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `10000` - -Time in milliseconds for an incomplete ssl handshake to timeout - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is {logstash-ref}/configuration-file-structure.html#path[path] - * There is no default value for this setting. - -SSL key to use. -NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] -for more information. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is {logstash-ref}/configuration-file-structure.html#password[password] - * There is no default value for this setting. - -SSL key passphrase to use. - -[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] -===== `ssl_verify_mode` - - * Value can be any of: `none`, `peer`, `force_peer` - * Default value is `"none"` - -By default the server doesn't do any client verification. - -`peer` will make the server ask the client to provide a certificate. -If the client provides a certificate, it will be validated. - -`force_peer` will make the server ask the client to provide a certificate. -If the client doesn't provide a certificate, the connection will be closed. - -This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. - -[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] -===== `tls_max_version` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `1.2` - -The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: -1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 - -[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] -===== `tls_min_version` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `1` - -The minimum TLS version allowed for the encrypted connections. The value must be one of the following: -1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc deleted file mode 100644 index 7f77a2e4d..000000000 --- a/docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc +++ /dev/null @@ -1,222 +0,0 @@ -:plugin: beats -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.1 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.1/CHANGELOG.md -:include_path: ../include -:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Beats input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input plugin enables Logstash to receive events from the -https://www.elastic.co/products/beats[Elastic Beats] framework. - -The following example shows how to configure Logstash to listen on port -5044 for incoming Beats connections and to index into Elasticsearch: - -[source,ruby] ------------------------------------------------------------------------------- -input { - beats { - port => 5044 - } -} - -output { - elasticsearch { - hosts => "localhost:9200" - manage_template => false - index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" - document_type => "%{[@metadata][type]}" - } -} ------------------------------------------------------------------------------- - -NOTE: The Beats shipper automatically sets the `type` field on the event. -You cannot override this setting in the Logstash config. If you specify -a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in -Logstash, it is ignored. - -IMPORTANT: If you are shipping events that span multiple lines, you need to -use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events -before sending the event data to Logstash. You cannot use the -{logstash-ref}/plugins-codecs-multiline.html[multiline] codec to handle multiline events. Doing so will -result in the failure to start Logstash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Beats Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No -| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No -| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No -| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] -===== `cipher_suites` - - * Value type is {logstash-ref}/configuration-file-structure.html#array[array] - * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` - -The list of ciphers suite to use, listed by priorities. - -[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] -===== `client_inactivity_timeout` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `60` - -Close Idle clients after X seconds of inactivity. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"0.0.0.0"` - -The IP address to listen on. - -[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] -===== `include_codec_tag` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `true` - - - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * There is no default value for this setting. - -The port to listen on. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `false` - -Events are by default sent in plain text. You can -enable encryption by setting `ssl` to true and configuring -the `ssl_certificate` and `ssl_key` options. - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] -===== `ssl_certificate` - - * Value type is {logstash-ref}/configuration-file-structure.html#path[path] - * There is no default value for this setting. - -SSL certificate to use. - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] -===== `ssl_certificate_authorities` - - * Value type is {logstash-ref}/configuration-file-structure.html#array[array] - * Default value is `[]` - -Validate client certificates against these authorities. -You can define multiple files or paths. All the certificates will -be read and added to the trust store. You need to configure the `ssl_verify_mode` -to `peer` or `force_peer` to enable the verification. - - -[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] -===== `ssl_handshake_timeout` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `10000` - -Time in milliseconds for an incomplete ssl handshake to timeout - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is {logstash-ref}/configuration-file-structure.html#path[path] - * There is no default value for this setting. - -SSL key to use. -NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] -for more information. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is {logstash-ref}/configuration-file-structure.html#password[password] - * There is no default value for this setting. - -SSL key passphrase to use. - -[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] -===== `ssl_verify_mode` - - * Value can be any of: `none`, `peer`, `force_peer` - * Default value is `"none"` - -By default the server doesn't do any client verification. - -`peer` will make the server ask the client to provide a certificate. -If the client provides a certificate, it will be validated. - -`force_peer` will make the server ask the client to provide a certificate. -If the client doesn't provide a certificate, the connection will be closed. - -This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. - -[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] -===== `tls_max_version` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `1.2` - -The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: -1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 - -[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] -===== `tls_min_version` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `1` - -The minimum TLS version allowed for the encrypted connections. The value must be one of the following: -1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-index.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-index.asciidoc deleted file mode 100644 index c2e33c563..000000000 --- a/docs/versioned-plugins/inputs/dead_letter_queue-index.asciidoc +++ /dev/null @@ -1,15 +0,0 @@ -:plugin: dead_letter_queue -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | Aug 25, 2017 (latest) -| <> | Aug 25, 2017 -|======================================================================= - - -include::dead_letter_queue-v1.1.0.asciidoc[] -include::dead_letter_queue-v1.0.0.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.0.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.0.asciidoc deleted file mode 100644 index 0a84eb021..000000000 --- a/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.0.asciidoc +++ /dev/null @@ -1,110 +0,0 @@ -:plugin: dead_letter_queue -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.0 -:release_date: 2017-08-25 -:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.1.0/CHANGELOG.md -:include_path: ../include -:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Dead_letter_queue input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Logstash input to read events from Logstash's dead letter queue. - -[source, sh] ------------------------------------------ -input { - dead_letter_queue { - path => "/var/logstash/data/dead_letter_queue" - start_timestamp => "2017-04-04T23:40:37" - } -} ------------------------------------------ - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Dead_letter_queue Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes -| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] -===== `commit_offsets` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `true` - -Specifies whether this input should commit offsets as it processes the events. -Typically you specify `false` when you want to iterate multiple times over the -events in the dead letter queue, but don't want to save state. This is when you -are exploring the events in the dead letter queue. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is {logstash-ref}/configuration-file-structure.html#path[path] - * There is no default value for this setting. - -Path to the dead letter queue directory that was created by a Logstash instance. -This is the path from which "dead" events are read and is typically configured -in the original Logstash instance with the setting `path.dead_letter_queue`. - -[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] -===== `pipeline_id` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"main"` - -ID of the pipeline whose events you want to read from. - -[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] -===== `sincedb_path` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -Path of the sincedb database file (keeps track of the current position of dead letter queue) that -will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. - -NOTE: This value must be a file path and not a directory path. - -[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] -===== `start_timestamp` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -Timestamp in ISO8601 format from when you want to start processing the events from. -For example, `2017-04-04T23:40:37`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.0.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.0.asciidoc deleted file mode 100644 index 09c12af85..000000000 --- a/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.0.asciidoc +++ /dev/null @@ -1,110 +0,0 @@ -:plugin: dead_letter_queue -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.1.0 -:release_date: 2017-08-25 -:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.1.0/CHANGELOG.md -:include_path: ../include -:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Dead_letter_queue input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Logstash input to read events from Logstash's dead letter queue. - -[source, sh] ------------------------------------------ -input { - dead_letter_queue { - path => "/var/logstash/data/dead_letter_queue" - start_timestamp => "2017-04-04T23:40:37" - } -} ------------------------------------------ - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Dead_letter_queue Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes -| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] -===== `commit_offsets` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `true` - -Specifies whether this input should commit offsets as it processes the events. -Typically you specify `false` when you want to iterate multiple times over the -events in the dead letter queue, but don't want to save state. This is when you -are exploring the events in the dead letter queue. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is {logstash-ref}/configuration-file-structure.html#path[path] - * There is no default value for this setting. - -Path to the dead letter queue directory that was created by a Logstash instance. -This is the path from which "dead" events are read and is typically configured -in the original Logstash instance with the setting `path.dead_letter_queue`. - -[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] -===== `pipeline_id` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"main"` - -ID of the pipeline whose events you want to read from. - -[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] -===== `sincedb_path` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -Path of the sincedb database file (keeps track of the current position of dead letter queue) that -will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. - -NOTE: This value must be a file path and not a directory path. - -[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] -===== `start_timestamp` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -Timestamp in ISO8601 format from when you want to start processing the events from. -For example, `2017-04-04T23:40:37`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs.asciidoc b/docs/versioned-plugins/outputs.asciidoc deleted file mode 100644 index c1f22fd24..000000000 --- a/docs/versioned-plugins/outputs.asciidoc +++ /dev/null @@ -1,6 +0,0 @@ -:type: output -:type_uc: Output - -include::include/plugin-intro.asciidoc[] - -include::outputs/elasticsearch-index.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-index.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-index.asciidoc deleted file mode 100644 index 521825202..000000000 --- a/docs/versioned-plugins/outputs/elasticsearch-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: elasticsearch -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | Aug 21, 2017 (latest) -| <> | Aug 21, 2017 -|======================================================================= - - -include::elasticsearch-v8.1.1.asciidoc[] -include::elasticsearch-v8.1.0.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.1.0.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.1.0.asciidoc deleted file mode 100644 index 0002dbb2a..000000000 --- a/docs/versioned-plugins/outputs/elasticsearch-v8.1.0.asciidoc +++ /dev/null @@ -1,664 +0,0 @@ -:plugin: elasticsearch -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v8.1.0 -:release_date: 2017-08-21 -:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v8.1.1/CHANGELOG.md -:include_path: ../include -:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Elasticsearch output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -.Compatibility Note -[NOTE] -================================================================================ -Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] -called `http.content_type.required`. If this option is set to `true`, and you -are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output -plugin to version 6.2.5 or higher. - -================================================================================ - -This plugin is the recommended method of storing logs in Elasticsearch. -If you plan on using the Kibana web interface, you'll want to use this output. - -This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. -We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, -yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having -to upgrade Logstash in lock-step. - -You can learn more about Elasticsearch at - -==== Template management for Elasticsearch 5.x -Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. -Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default -behavior. - -** Users installing ES 5.x and LS 5.x ** -This change will not affect you and you will continue to use the ES defaults. - -** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** -LS will not force upgrade the template, if `logstash` template already exists. This means you will still use -`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after -the new template is installed. - -==== Retry Policy - -The retry policy has changed significantly in the 8.1.1 release. -This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience -either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP -request are handled differently than error codes for individual documents. - -HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. - -The following document errors are handled as follows: -- 400 and 404 errors are sent to the DLQ if enabled. If a DLQ is not enabled a log message will be emitted and the event will be dropped. -- 409 errors (conflict) are logged as a warning and dropped. - -Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. -It is more performant for Elasticsearch to retry these exceptions than this plugin. - -==== Batch Sizes ==== -This plugin attempts to send batches of events as a single request. However, if -a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. - -==== DNS Caching - -This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], -a global setting for the JVM. - -As an example, to set your DNS TTL to 1 second you would set -the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. - -Keep in mind that a connection with keepalive enabled will -not reevaluate its DNS value while the keepalive is in effect. - -==== HTTP Compression - -This plugin supports request and response compression. Response compression is enabled by default and -for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for -it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in -Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin - -For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` -setting in their Logstash config file. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Elasticsearch Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No -| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-action"] -===== `action` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"index"` - -Protocol agnostic (i.e. non-http, non-java specific) configs go here -Protocol agnostic methods -The Elasticsearch action to perform. Valid actions are: - -- index: indexes a document (an event from Logstash). -- delete: deletes a document by id (An id is required for this action) -- create: indexes a document, fails if a document by that id already exists in the index. -- update: updates a document by id. Update has a special case where you can upsert -- update a - document if not already present. See the `upsert` option. NOTE: This does not work and is not supported - in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! -- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` - would use the foo field for the action - -For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] - -[id="{version}-plugins-{type}s-{plugin}-bulk_path"] -===== `bulk_path` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -HTTP Path to perform the _bulk requests to -this defaults to a concatenation of the path parameter and "_bulk" - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is {logstash-ref}/configuration-file-structure.html#path[path] - * There is no default value for this setting. - -The .cer or .pem file to validate the server's certificate - -[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] -===== `doc_as_upsert` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `false` - -Enable `doc_as_upsert` for update mode. -Create a new document with source if `document_id` doesn't exist in Elasticsearch - -[id="{version}-plugins-{type}s-{plugin}-document_id"] -===== `document_id` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -The document ID for the index. Useful for overwriting existing entries in -Elasticsearch with the same ID. - -[id="{version}-plugins-{type}s-{plugin}-document_type"] -===== `document_type` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -The document type to write events to. Generally you should try to write only -similar events to the same 'type'. String expansion `%{foo}` works here. -Unless you set 'document_type', the event 'type' will be used if it exists -otherwise the document type will be assigned the value of 'logs' - -[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] -===== `failure_type_logging_whitelist` - - * Value type is {logstash-ref}/configuration-file-structure.html#array[array] - * Default value is `[]` - -Set the Elasticsearch errors in the whitelist that you don't want to log. -A useful example is when you want to skip all 409 errors -which are `document_already_exists_exception`. - -[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] -===== `healthcheck_path` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -HTTP Path where a HEAD request is sent when a backend is marked down -the request is sent in the background to see if it has come back again -before it is once again eligible to service requests. -If you have custom firewall rules you may need to change this - -[id="{version}-plugins-{type}s-{plugin}-hosts"] -===== `hosts` - - * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] - * Default value is `[//127.0.0.1]` - -Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. -Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). - `"127.0.0.1"` - `["127.0.0.1:9200","127.0.0.2:9200"]` - `["http://127.0.0.1"]` - `["https://127.0.0.1:9200"]` - `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) -It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list -to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. - -Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. - -[id="{version}-plugins-{type}s-{plugin}-http_compression"] -===== `http_compression` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `false` - -Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond - -[id="{version}-plugins-{type}s-{plugin}-index"] -===== `index` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"logstash-%{+YYYY.MM.dd}"` - -The index to write events to. This can be dynamic using the `%{foo}` syntax. -The default value will partition your indices by day so you can more easily -delete old data or only search specific date ranges. -Indexes may not contain uppercase characters. -For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. -LS uses Joda to format the index pattern from event timestamp. -Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is {logstash-ref}/configuration-file-structure.html#path[path] - * There is no default value for this setting. - -The keystore used to present a certificate to the server. -It can be either .jks or .p12 - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is {logstash-ref}/configuration-file-structure.html#password[password] - * There is no default value for this setting. - -Set the keystore password - -[id="{version}-plugins-{type}s-{plugin}-manage_template"] -===== `manage_template` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `true` - -From Logstash 1.3 onwards, a template is applied to Elasticsearch during -Logstash's startup if one with the name `template_name` does not already exist. -By default, the contents of this template is the default template for -`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern -`logstash-*`. Should you require support for other index names, or would like -to change the mappings in the template in general, a custom template can be -specified by setting `template` to the path of a template file. - -Setting `manage_template` to false disables this feature. If you require more -control over template creation, (e.g. creating indices dynamically based on -field names) you should set `manage_template` to false and use the REST -API to apply your templates manually. - -[id="{version}-plugins-{type}s-{plugin}-parameters"] -===== `parameters` - - * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] - * There is no default value for this setting. - -Pass a set of key value pairs as the URL query string. This query string is added -to every host listed in the 'hosts' configuration. If the 'hosts' list contains -urls that already have query strings, the one specified here will be appended. - -[id="{version}-plugins-{type}s-{plugin}-parent"] -===== `parent` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `nil` - -For child documents, ID of the associated parent. -This can be dynamic using the `%{foo}` syntax. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is {logstash-ref}/configuration-file-structure.html#password[password] - * There is no default value for this setting. - -Password to authenticate to a secure Elasticsearch cluster - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps -the root path for the Elasticsearch HTTP API lives. -Note that if you use paths as components of URLs in the 'hosts' field you may -not also set this field. That will raise an error at startup - -[id="{version}-plugins-{type}s-{plugin}-pipeline"] -===== `pipeline` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `nil` - -Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration -here like `pipeline => "%{INGEST_PIPELINE}"` - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `1000` - -While the output tries to reuse connections efficiently we have a maximum. -This sets the maximum number of open connections the output will create. -Setting this too low may mean frequently closing / opening connections -which is bad. - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `100` - -While the output tries to reuse connections efficiently we have a maximum per endpoint. -This sets the maximum number of open connections per endpoint the output will create. -Setting this too low may mean frequently closing / opening connections -which is bad. - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] - * There is no default value for this setting. - -Set the address of a forward HTTP proxy. -This used to accept hashes as arguments but now only accepts -arguments of the URI type to prevent leaking credentials. - -[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] -===== `resurrect_delay` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `5` - -How frequently, in seconds, to wait between resurrection attempts. -Resurrection is the process by which backend endpoints marked 'down' are checked -to see if they have come back to life - -[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] -===== `retry_initial_interval` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `2` - -Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` - -[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] -===== `retry_max_interval` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `64` - -Set max interval in seconds between bulk retries. - -[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] -===== `retry_on_conflict` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `1` - -The number of times Elasticsearch should internally retry an update/upserted document -See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] -for more info - -[id="{version}-plugins-{type}s-{plugin}-routing"] -===== `routing` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -A routing override to be applied to all processed events. -This can be dynamic using the `%{foo}` syntax. - -[id="{version}-plugins-{type}s-{plugin}-script"] -===== `script` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `""` - -Set script name for scripted update mode - -[id="{version}-plugins-{type}s-{plugin}-script_lang"] -===== `script_lang` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"painless"` - -Set the language of the used script. If not set, this defaults to painless in ES 5.0 - -[id="{version}-plugins-{type}s-{plugin}-script_type"] -===== `script_type` - - * Value can be any of: `inline`, `indexed`, `file` - * Default value is `["inline"]` - -Define the type of script referenced by "script" variable - inline : "script" contains inline script - indexed : "script" contains the name of script directly indexed in elasticsearch - file : "script" contains the name of script stored in elasticseach's config directory - -[id="{version}-plugins-{type}s-{plugin}-script_var_name"] -===== `script_var_name` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"event"` - -Set variable name passed to script (scripted update) - -[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] -===== `scripted_upsert` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `false` - -if enabled, script is in charge of creating non-existent document (scripted update) - -[id="{version}-plugins-{type}s-{plugin}-sniffing"] -===== `sniffing` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `false` - -This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. -Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use -this with master nodes, you probably want to disable HTTP on them by setting -`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or -manually enter multiple Elasticsearch hosts using the `hosts` parameter. - -[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] -===== `sniffing_delay` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `5` - -How long to wait, in seconds, between sniffing attempts - -[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] -===== `sniffing_path` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -HTTP Path to be used for the sniffing requests -the default value is computed by concatenating the path value and "_nodes/http" -if sniffing_path is set it will be used as an absolute path -do not use full URL here, only paths, e.g. "/sniff/_nodes/http" - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * There is no default value for this setting. - -Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme -is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. -If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] -===== `ssl_certificate_verification` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `true` - -Option to validate the server's certificate. Disabling this severely compromises security. -For more information on disabling certificate verification please read -https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf - -[id="{version}-plugins-{type}s-{plugin}-template"] -===== `template` - - * Value type is {logstash-ref}/configuration-file-structure.html#path[path] - * There is no default value for this setting. - -You can set the path to your own template here, if you so desire. -If not set, the included template will be used. - -[id="{version}-plugins-{type}s-{plugin}-template_name"] -===== `template_name` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"logstash"` - -This configuration option defines how the template is named inside Elasticsearch. -Note that if you have used the template management features and subsequently -change this, you will need to prune the old template manually, e.g. - -`curl -XDELETE ` - -where `OldTemplateName` is whatever the former setting was. - -[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] -===== `template_overwrite` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `false` - -The template_overwrite option will always overwrite the indicated template -in Elasticsearch with either the one indicated by template or the included one. -This option is set to false by default. If you always want to stay up to date -with the template provided by Logstash, this option could be very useful to you. -Likewise, if you have your own template file managed by puppet, for example, and -you wanted to be able to update it regularly, this option could help there as well. - -Please note that if you are using your own customized version of the Logstash -template (logstash), setting this to true will make Logstash to overwrite -the "logstash" template (i.e. removing all customized settings) - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `60` - -Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If -a timeout occurs, the request will be retried. - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is {logstash-ref}/configuration-file-structure.html#path[path] - * There is no default value for this setting. - -The JKS truststore to validate the server's certificate. -Use either `:truststore` or `:cacert` - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is {logstash-ref}/configuration-file-structure.html#password[password] - * There is no default value for this setting. - -Set the truststore password - -[id="{version}-plugins-{type}s-{plugin}-upsert"] -===== `upsert` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `""` - -Set upsert content for update mode.s -Create a new document with this parameter as json string if `document_id` doesn't exists - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -Username to authenticate to a secure Elasticsearch cluster - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `10000` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -You may want to set this lower, if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must -be re-validated prior to being leased to the consumer. Non-positive value passed to -this method disables connection validation. This check helps detect connections that -have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - -[id="{version}-plugins-{type}s-{plugin}-version"] -===== `version` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. -See https://www.elastic.co/blog/elasticsearch-versioning-support. - -[id="{version}-plugins-{type}s-{plugin}-version_type"] -===== `version_type` - - * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` - * There is no default value for this setting. - -The version_type to use for indexing. -See https://www.elastic.co/blog/elasticsearch-versioning-support. -See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc deleted file mode 100644 index 9186a90b3..000000000 --- a/docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc +++ /dev/null @@ -1,664 +0,0 @@ -:plugin: elasticsearch -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v8.1.1 -:release_date: 2017-08-21 -:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v8.1.1/CHANGELOG.md -:include_path: ../include -:logstash-ref: http://www.elastic.co/guide/en/logstash/6.0 -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Elasticsearch output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -.Compatibility Note -[NOTE] -================================================================================ -Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] -called `http.content_type.required`. If this option is set to `true`, and you -are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output -plugin to version 6.2.5 or higher. - -================================================================================ - -This plugin is the recommended method of storing logs in Elasticsearch. -If you plan on using the Kibana web interface, you'll want to use this output. - -This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. -We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, -yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having -to upgrade Logstash in lock-step. - -You can learn more about Elasticsearch at - -==== Template management for Elasticsearch 5.x -Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. -Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default -behavior. - -** Users installing ES 5.x and LS 5.x ** -This change will not affect you and you will continue to use the ES defaults. - -** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** -LS will not force upgrade the template, if `logstash` template already exists. This means you will still use -`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after -the new template is installed. - -==== Retry Policy - -The retry policy has changed significantly in the 8.1.1 release. -This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience -either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP -request are handled differently than error codes for individual documents. - -HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. - -The following document errors are handled as follows: -- 400 and 404 errors are sent to the DLQ if enabled. If a DLQ is not enabled a log message will be emitted and the event will be dropped. -- 409 errors (conflict) are logged as a warning and dropped. - -Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. -It is more performant for Elasticsearch to retry these exceptions than this plugin. - -==== Batch Sizes ==== -This plugin attempts to send batches of events as a single request. However, if -a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. - -==== DNS Caching - -This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], -a global setting for the JVM. - -As an example, to set your DNS TTL to 1 second you would set -the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. - -Keep in mind that a connection with keepalive enabled will -not reevaluate its DNS value while the keepalive is in effect. - -==== HTTP Compression - -This plugin supports request and response compression. Response compression is enabled by default and -for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for -it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in -Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin - -For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` -setting in their Logstash config file. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Elasticsearch Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No -| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No -| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-action"] -===== `action` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"index"` - -Protocol agnostic (i.e. non-http, non-java specific) configs go here -Protocol agnostic methods -The Elasticsearch action to perform. Valid actions are: - -- index: indexes a document (an event from Logstash). -- delete: deletes a document by id (An id is required for this action) -- create: indexes a document, fails if a document by that id already exists in the index. -- update: updates a document by id. Update has a special case where you can upsert -- update a - document if not already present. See the `upsert` option. NOTE: This does not work and is not supported - in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! -- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` - would use the foo field for the action - -For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] - -[id="{version}-plugins-{type}s-{plugin}-bulk_path"] -===== `bulk_path` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -HTTP Path to perform the _bulk requests to -this defaults to a concatenation of the path parameter and "_bulk" - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is {logstash-ref}/configuration-file-structure.html#path[path] - * There is no default value for this setting. - -The .cer or .pem file to validate the server's certificate - -[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] -===== `doc_as_upsert` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `false` - -Enable `doc_as_upsert` for update mode. -Create a new document with source if `document_id` doesn't exist in Elasticsearch - -[id="{version}-plugins-{type}s-{plugin}-document_id"] -===== `document_id` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -The document ID for the index. Useful for overwriting existing entries in -Elasticsearch with the same ID. - -[id="{version}-plugins-{type}s-{plugin}-document_type"] -===== `document_type` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -The document type to write events to. Generally you should try to write only -similar events to the same 'type'. String expansion `%{foo}` works here. -Unless you set 'document_type', the event 'type' will be used if it exists -otherwise the document type will be assigned the value of 'logs' - -[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] -===== `failure_type_logging_whitelist` - - * Value type is {logstash-ref}/configuration-file-structure.html#array[array] - * Default value is `[]` - -Set the Elasticsearch errors in the whitelist that you don't want to log. -A useful example is when you want to skip all 409 errors -which are `document_already_exists_exception`. - -[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] -===== `healthcheck_path` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -HTTP Path where a HEAD request is sent when a backend is marked down -the request is sent in the background to see if it has come back again -before it is once again eligible to service requests. -If you have custom firewall rules you may need to change this - -[id="{version}-plugins-{type}s-{plugin}-hosts"] -===== `hosts` - - * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] - * Default value is `[//127.0.0.1]` - -Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. -Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). - `"127.0.0.1"` - `["127.0.0.1:9200","127.0.0.2:9200"]` - `["http://127.0.0.1"]` - `["https://127.0.0.1:9200"]` - `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) -It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list -to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. - -Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. - -[id="{version}-plugins-{type}s-{plugin}-http_compression"] -===== `http_compression` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `false` - -Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond - -[id="{version}-plugins-{type}s-{plugin}-index"] -===== `index` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"logstash-%{+YYYY.MM.dd}"` - -The index to write events to. This can be dynamic using the `%{foo}` syntax. -The default value will partition your indices by day so you can more easily -delete old data or only search specific date ranges. -Indexes may not contain uppercase characters. -For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. -LS uses Joda to format the index pattern from event timestamp. -Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is {logstash-ref}/configuration-file-structure.html#path[path] - * There is no default value for this setting. - -The keystore used to present a certificate to the server. -It can be either .jks or .p12 - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is {logstash-ref}/configuration-file-structure.html#password[password] - * There is no default value for this setting. - -Set the keystore password - -[id="{version}-plugins-{type}s-{plugin}-manage_template"] -===== `manage_template` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `true` - -From Logstash 1.3 onwards, a template is applied to Elasticsearch during -Logstash's startup if one with the name `template_name` does not already exist. -By default, the contents of this template is the default template for -`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern -`logstash-*`. Should you require support for other index names, or would like -to change the mappings in the template in general, a custom template can be -specified by setting `template` to the path of a template file. - -Setting `manage_template` to false disables this feature. If you require more -control over template creation, (e.g. creating indices dynamically based on -field names) you should set `manage_template` to false and use the REST -API to apply your templates manually. - -[id="{version}-plugins-{type}s-{plugin}-parameters"] -===== `parameters` - - * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] - * There is no default value for this setting. - -Pass a set of key value pairs as the URL query string. This query string is added -to every host listed in the 'hosts' configuration. If the 'hosts' list contains -urls that already have query strings, the one specified here will be appended. - -[id="{version}-plugins-{type}s-{plugin}-parent"] -===== `parent` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `nil` - -For child documents, ID of the associated parent. -This can be dynamic using the `%{foo}` syntax. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is {logstash-ref}/configuration-file-structure.html#password[password] - * There is no default value for this setting. - -Password to authenticate to a secure Elasticsearch cluster - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps -the root path for the Elasticsearch HTTP API lives. -Note that if you use paths as components of URLs in the 'hosts' field you may -not also set this field. That will raise an error at startup - -[id="{version}-plugins-{type}s-{plugin}-pipeline"] -===== `pipeline` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `nil` - -Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration -here like `pipeline => "%{INGEST_PIPELINE}"` - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `1000` - -While the output tries to reuse connections efficiently we have a maximum. -This sets the maximum number of open connections the output will create. -Setting this too low may mean frequently closing / opening connections -which is bad. - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `100` - -While the output tries to reuse connections efficiently we have a maximum per endpoint. -This sets the maximum number of open connections per endpoint the output will create. -Setting this too low may mean frequently closing / opening connections -which is bad. - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] - * There is no default value for this setting. - -Set the address of a forward HTTP proxy. -This used to accept hashes as arguments but now only accepts -arguments of the URI type to prevent leaking credentials. - -[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] -===== `resurrect_delay` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `5` - -How frequently, in seconds, to wait between resurrection attempts. -Resurrection is the process by which backend endpoints marked 'down' are checked -to see if they have come back to life - -[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] -===== `retry_initial_interval` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `2` - -Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` - -[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] -===== `retry_max_interval` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `64` - -Set max interval in seconds between bulk retries. - -[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] -===== `retry_on_conflict` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `1` - -The number of times Elasticsearch should internally retry an update/upserted document -See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] -for more info - -[id="{version}-plugins-{type}s-{plugin}-routing"] -===== `routing` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -A routing override to be applied to all processed events. -This can be dynamic using the `%{foo}` syntax. - -[id="{version}-plugins-{type}s-{plugin}-script"] -===== `script` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `""` - -Set script name for scripted update mode - -[id="{version}-plugins-{type}s-{plugin}-script_lang"] -===== `script_lang` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"painless"` - -Set the language of the used script. If not set, this defaults to painless in ES 5.0 - -[id="{version}-plugins-{type}s-{plugin}-script_type"] -===== `script_type` - - * Value can be any of: `inline`, `indexed`, `file` - * Default value is `["inline"]` - -Define the type of script referenced by "script" variable - inline : "script" contains inline script - indexed : "script" contains the name of script directly indexed in elasticsearch - file : "script" contains the name of script stored in elasticseach's config directory - -[id="{version}-plugins-{type}s-{plugin}-script_var_name"] -===== `script_var_name` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"event"` - -Set variable name passed to script (scripted update) - -[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] -===== `scripted_upsert` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `false` - -if enabled, script is in charge of creating non-existent document (scripted update) - -[id="{version}-plugins-{type}s-{plugin}-sniffing"] -===== `sniffing` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `false` - -This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. -Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use -this with master nodes, you probably want to disable HTTP on them by setting -`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or -manually enter multiple Elasticsearch hosts using the `hosts` parameter. - -[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] -===== `sniffing_delay` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `5` - -How long to wait, in seconds, between sniffing attempts - -[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] -===== `sniffing_path` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -HTTP Path to be used for the sniffing requests -the default value is computed by concatenating the path value and "_nodes/http" -if sniffing_path is set it will be used as an absolute path -do not use full URL here, only paths, e.g. "/sniff/_nodes/http" - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * There is no default value for this setting. - -Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme -is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. -If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] -===== `ssl_certificate_verification` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `true` - -Option to validate the server's certificate. Disabling this severely compromises security. -For more information on disabling certificate verification please read -https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf - -[id="{version}-plugins-{type}s-{plugin}-template"] -===== `template` - - * Value type is {logstash-ref}/configuration-file-structure.html#path[path] - * There is no default value for this setting. - -You can set the path to your own template here, if you so desire. -If not set, the included template will be used. - -[id="{version}-plugins-{type}s-{plugin}-template_name"] -===== `template_name` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `"logstash"` - -This configuration option defines how the template is named inside Elasticsearch. -Note that if you have used the template management features and subsequently -change this, you will need to prune the old template manually, e.g. - -`curl -XDELETE ` - -where `OldTemplateName` is whatever the former setting was. - -[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] -===== `template_overwrite` - - * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] - * Default value is `false` - -The template_overwrite option will always overwrite the indicated template -in Elasticsearch with either the one indicated by template or the included one. -This option is set to false by default. If you always want to stay up to date -with the template provided by Logstash, this option could be very useful to you. -Likewise, if you have your own template file managed by puppet, for example, and -you wanted to be able to update it regularly, this option could help there as well. - -Please note that if you are using your own customized version of the Logstash -template (logstash), setting this to true will make Logstash to overwrite -the "logstash" template (i.e. removing all customized settings) - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `60` - -Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If -a timeout occurs, the request will be retried. - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is {logstash-ref}/configuration-file-structure.html#path[path] - * There is no default value for this setting. - -The JKS truststore to validate the server's certificate. -Use either `:truststore` or `:cacert` - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is {logstash-ref}/configuration-file-structure.html#password[password] - * There is no default value for this setting. - -Set the truststore password - -[id="{version}-plugins-{type}s-{plugin}-upsert"] -===== `upsert` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * Default value is `""` - -Set upsert content for update mode.s -Create a new document with this parameter as json string if `document_id` doesn't exists - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -Username to authenticate to a secure Elasticsearch cluster - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is {logstash-ref}/configuration-file-structure.html#number[number] - * Default value is `10000` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -You may want to set this lower, if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must -be re-validated prior to being leased to the consumer. Non-positive value passed to -this method disables connection validation. This check helps detect connections that -have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - -[id="{version}-plugins-{type}s-{plugin}-version"] -===== `version` - - * Value type is {logstash-ref}/configuration-file-structure.html#string[string] - * There is no default value for this setting. - -The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. -See https://www.elastic.co/blog/elasticsearch-versioning-support. - -[id="{version}-plugins-{type}s-{plugin}-version_type"] -===== `version_type` - - * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` - * There is no default value for this setting. - -The version_type to use for indexing. -See https://www.elastic.co/blog/elasticsearch-versioning-support. -See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] From 1190795ab3ddc81271940a0ef75d6a057257110a Mon Sep 17 00:00:00 2001 From: Joao Duarte Date: Thu, 11 Jan 2018 17:16:44 +0000 Subject: [PATCH 3/5] initial attempt at versioned plugin docs --- docs/versioned-plugins/codecs-index.asciidoc | 36 + .../codecs/avro-index.asciidoc | 18 + .../codecs/avro-v3.2.0.asciidoc | 103 +++ .../codecs/avro-v3.2.1.asciidoc | 96 +++ .../codecs/avro-v3.2.2.asciidoc | 96 +++ .../codecs/avro-v3.2.3.asciidoc | 96 +++ .../codecs/cef-index.asciidoc | 20 + .../codecs/cef-v4.1.3.asciidoc | 164 ++++ .../codecs/cef-v4.1.4.asciidoc | 164 ++++ .../codecs/cef-v5.0.0.asciidoc | 153 ++++ .../codecs/cef-v5.0.1.asciidoc | 153 ++++ .../codecs/cef-v5.0.2.asciidoc | 153 ++++ .../codecs/cloudfront-index.asciidoc | 16 + .../codecs/cloudfront-v3.0.1.asciidoc | 52 ++ .../codecs/cloudfront-v3.0.2.asciidoc | 52 ++ .../codecs/cloudfront-v3.0.3.asciidoc | 52 ++ .../codecs/cloudtrail-index.asciidoc | 14 + .../codecs/cloudtrail-v3.0.2.asciidoc | 44 ++ .../codecs/cloudtrail-v3.0.3.asciidoc | 44 ++ .../codecs/collectd-index.asciidoc | 20 + .../codecs/collectd-v3.0.4.asciidoc | 147 ++++ .../codecs/collectd-v3.0.5.asciidoc | 140 ++++ .../codecs/collectd-v3.0.6.asciidoc | 140 ++++ .../codecs/collectd-v3.0.7.asciidoc | 140 ++++ .../codecs/collectd-v3.0.8.asciidoc | 140 ++++ .../codecs/compress_spooler-index.asciidoc | 14 + .../codecs/compress_spooler-v2.0.5.asciidoc | 64 ++ .../codecs/compress_spooler-v2.0.6.asciidoc | 64 ++ .../codecs/csv-index.asciidoc | 14 + .../codecs/csv-v0.1.3.asciidoc | 132 ++++ .../codecs/csv-v0.1.4.asciidoc | 132 ++++ .../codecs/dots-index.asciidoc | 16 + .../codecs/dots-v3.0.3.asciidoc | 31 + .../codecs/dots-v3.0.5.asciidoc | 23 + .../codecs/dots-v3.0.6.asciidoc | 23 + .../codecs/edn-index.asciidoc | 16 + .../codecs/edn-v3.0.3.asciidoc | 33 + .../codecs/edn-v3.0.5.asciidoc | 25 + .../codecs/edn-v3.0.6.asciidoc | 25 + .../codecs/edn_lines-index.asciidoc | 16 + .../codecs/edn_lines-v3.0.3.asciidoc | 31 + .../codecs/edn_lines-v3.0.5.asciidoc | 23 + .../codecs/edn_lines-v3.0.6.asciidoc | 23 + .../codecs/es_bulk-index.asciidoc | 16 + .../codecs/es_bulk-v3.0.4.asciidoc | 35 + .../codecs/es_bulk-v3.0.5.asciidoc | 28 + .../codecs/es_bulk-v3.0.6.asciidoc | 28 + .../codecs/example-index.asciidoc | 10 + .../codecs/fluent-index.asciidoc | 18 + .../codecs/fluent-v3.1.2.asciidoc | 51 ++ .../codecs/fluent-v3.1.3.asciidoc | 43 ++ .../codecs/fluent-v3.1.4.asciidoc | 43 ++ .../codecs/fluent-v3.1.5.asciidoc | 43 ++ .../codecs/graphite-index.asciidoc | 16 + .../codecs/graphite-v3.0.3.asciidoc | 93 +++ .../codecs/graphite-v3.0.4.asciidoc | 93 +++ .../codecs/graphite-v3.0.5.asciidoc | 93 +++ .../codecs/gzip_lines-index.asciidoc | 18 + .../codecs/gzip_lines-v3.0.0.asciidoc | 59 ++ .../codecs/gzip_lines-v3.0.1.asciidoc | 52 ++ .../codecs/gzip_lines-v3.0.2.asciidoc | 52 ++ .../codecs/gzip_lines-v3.0.3.asciidoc | 52 ++ .../codecs/json-index.asciidoc | 16 + .../codecs/json-v3.0.3.asciidoc | 62 ++ .../codecs/json-v3.0.4.asciidoc | 62 ++ .../codecs/json-v3.0.5.asciidoc | 62 ++ .../codecs/json_lines-index.asciidoc | 16 + .../codecs/json_lines-v3.0.3.asciidoc | 67 ++ .../codecs/json_lines-v3.0.4.asciidoc | 67 ++ .../codecs/json_lines-v3.0.5.asciidoc | 67 ++ .../codecs/json_pretty-index.asciidoc | 10 + .../codecs/line-index.asciidoc | 22 + .../codecs/line-v3.0.3.asciidoc | 72 ++ .../codecs/line-v3.0.4.asciidoc | 72 ++ .../codecs/line-v3.0.5.asciidoc | 72 ++ .../codecs/line-v3.0.6.asciidoc | 72 ++ .../codecs/line-v3.0.7.asciidoc | 72 ++ .../codecs/line-v3.0.8.asciidoc | 72 ++ .../codecs/msgpack-index.asciidoc | 18 + .../codecs/msgpack-v3.0.3.asciidoc | 44 ++ .../codecs/msgpack-v3.0.5.asciidoc | 44 ++ .../codecs/msgpack-v3.0.6.asciidoc | 44 ++ .../codecs/msgpack-v3.0.7.asciidoc | 44 ++ .../codecs/multiline-index.asciidoc | 18 + .../codecs/multiline-v3.0.5.asciidoc | 222 ++++++ .../codecs/multiline-v3.0.6.asciidoc | 222 ++++++ .../codecs/multiline-v3.0.7.asciidoc | 222 ++++++ .../codecs/multiline-v3.0.8.asciidoc | 222 ++++++ .../codecs/netflow-index.asciidoc | 34 + .../codecs/netflow-v3.10.0.asciidoc | 210 ++++++ .../codecs/netflow-v3.4.1.asciidoc | 192 +++++ .../codecs/netflow-v3.5.0.asciidoc | 192 +++++ .../codecs/netflow-v3.5.1.asciidoc | 187 +++++ .../codecs/netflow-v3.5.2.asciidoc | 206 ++++++ .../codecs/netflow-v3.6.0.asciidoc | 205 +++++ .../codecs/netflow-v3.7.0.asciidoc | 205 +++++ .../codecs/netflow-v3.7.1.asciidoc | 205 +++++ .../codecs/netflow-v3.8.0.asciidoc | 205 +++++ .../codecs/netflow-v3.8.1.asciidoc | 207 ++++++ .../codecs/netflow-v3.8.3.asciidoc | 207 ++++++ .../codecs/netflow-v3.9.0.asciidoc | 209 ++++++ .../codecs/netflow-v3.9.1.asciidoc | 209 ++++++ .../codecs/nmap-index.asciidoc | 16 + .../codecs/nmap-v0.0.19.asciidoc | 81 ++ .../codecs/nmap-v0.0.20.asciidoc | 81 ++ .../codecs/nmap-v0.0.21.asciidoc | 81 ++ .../codecs/oldlogstashjson-index.asciidoc | 14 + .../codecs/oldlogstashjson-v2.0.5.asciidoc | 31 + .../codecs/oldlogstashjson-v2.0.7.asciidoc | 26 + .../codecs/plain-index.asciidoc | 18 + .../codecs/plain-v3.0.3.asciidoc | 65 ++ .../codecs/plain-v3.0.4.asciidoc | 65 ++ .../codecs/plain-v3.0.5.asciidoc | 65 ++ .../codecs/plain-v3.0.6.asciidoc | 65 ++ .../codecs/pretty-index.asciidoc | 10 + .../codecs/protobuf-index.asciidoc | 14 + .../codecs/protobuf-v1.0.2.asciidoc | 106 +++ .../codecs/protobuf-v1.0.3.asciidoc | 106 +++ .../codecs/rubydebug-index.asciidoc | 16 + .../codecs/rubydebug-v3.0.3.asciidoc | 46 ++ .../codecs/rubydebug-v3.0.4.asciidoc | 46 ++ .../codecs/rubydebug-v3.0.5.asciidoc | 46 ++ .../codecs/s3plain-index.asciidoc | 16 + .../codecs/s3plain-v2.0.5.asciidoc | 32 + .../codecs/s3plain-v2.0.6.asciidoc | 32 + .../codecs/s3plain-v2.0.7.asciidoc | 32 + .../codecs/sflow-index.asciidoc | 10 + docs/versioned-plugins/filters-index.asciidoc | 69 ++ .../filters/age-index.asciidoc | 14 + .../filters/age-v1.0.1.asciidoc | 66 ++ .../filters/age-v1.0.2.asciidoc | 66 ++ .../filters/aggregate-index.asciidoc | 24 + .../filters/aggregate-v2.6.0.asciidoc | 552 ++++++++++++++ .../filters/aggregate-v2.6.1.asciidoc | 553 ++++++++++++++ .../filters/aggregate-v2.6.3.asciidoc | 542 ++++++++++++++ .../filters/aggregate-v2.6.4.asciidoc | 542 ++++++++++++++ .../filters/aggregate-v2.7.0.asciidoc | 555 ++++++++++++++ .../filters/aggregate-v2.7.1.asciidoc | 555 ++++++++++++++ .../filters/aggregate-v2.7.2.asciidoc | 555 ++++++++++++++ .../filters/alter-index.asciidoc | 16 + .../filters/alter-v3.0.1.asciidoc | 111 +++ .../filters/alter-v3.0.2.asciidoc | 111 +++ .../filters/alter-v3.0.3.asciidoc | 111 +++ .../filters/anonymize-index.asciidoc | 16 + .../filters/anonymize-v3.0.4.asciidoc | 77 ++ .../filters/anonymize-v3.0.5.asciidoc | 77 ++ .../filters/anonymize-v3.0.6.asciidoc | 77 ++ .../filters/bytesize-index.asciidoc | 10 + .../filters/checksum-index.asciidoc | 14 + .../filters/checksum-v3.0.3.asciidoc | 69 ++ .../filters/checksum-v3.0.4.asciidoc | 69 ++ .../filters/cidr-index.asciidoc | 16 + .../filters/cidr-v3.0.1.asciidoc | 80 ++ .../filters/cidr-v3.1.1.asciidoc | 114 +++ .../filters/cidr-v3.1.2.asciidoc | 114 +++ .../filters/cipher-index.asciidoc | 18 + .../filters/cipher-v2.0.6.asciidoc | 243 ++++++ .../filters/cipher-v2.0.7.asciidoc | 243 ++++++ .../filters/cipher-v3.0.0.asciidoc | 220 ++++++ .../filters/cipher-v3.0.1.asciidoc | 220 ++++++ .../filters/clone-index.asciidoc | 16 + .../filters/clone-v3.0.3.asciidoc | 57 ++ .../filters/clone-v3.0.4.asciidoc | 57 ++ .../filters/clone-v3.0.5.asciidoc | 57 ++ .../filters/cloudfoundry-index.asciidoc | 10 + .../filters/collate-index.asciidoc | 14 + .../filters/collate-v2.0.5.asciidoc | 84 +++ .../filters/collate-v2.0.6.asciidoc | 84 +++ .../filters/csv-index.asciidoc | 20 + .../filters/csv-v3.0.3.asciidoc | 152 ++++ .../filters/csv-v3.0.4.asciidoc | 153 ++++ .../filters/csv-v3.0.5.asciidoc | 153 ++++ .../filters/csv-v3.0.6.asciidoc | 153 ++++ .../filters/csv-v3.0.7.asciidoc | 153 ++++ .../filters/date-index.asciidoc | 16 + .../filters/date-v3.1.7.asciidoc | 215 ++++++ .../filters/date-v3.1.8.asciidoc | 215 ++++++ .../filters/date-v3.1.9.asciidoc | 215 ++++++ .../filters/de_dot-index.asciidoc | 16 + .../filters/de_dot-v1.0.1.asciidoc | 82 ++ .../filters/de_dot-v1.0.2.asciidoc | 82 ++ .../filters/de_dot-v1.0.3.asciidoc | 82 ++ .../filters/debug-index.asciidoc | 10 + .../filters/dissect-index.asciidoc | 16 + .../filters/dissect-v1.0.9.asciidoc | 213 ++++++ .../filters/dissect-v1.1.1.asciidoc | 283 +++++++ .../filters/dissect-v1.1.2.asciidoc | 283 +++++++ .../filters/dns-index.asciidoc | 18 + .../filters/dns-v3.0.4.asciidoc | 161 ++++ .../filters/dns-v3.0.5.asciidoc | 161 ++++ .../filters/dns-v3.0.6.asciidoc | 161 ++++ .../filters/dns-v3.0.7.asciidoc | 161 ++++ .../filters/drop-index.asciidoc | 16 + .../filters/drop-v3.0.3.asciidoc | 77 ++ .../filters/drop-v3.0.4.asciidoc | 77 ++ .../filters/drop-v3.0.5.asciidoc | 77 ++ .../filters/elapsed-index.asciidoc | 16 + .../filters/elapsed-v4.0.2.asciidoc | 168 +++++ .../filters/elapsed-v4.0.3.asciidoc | 168 +++++ .../filters/elapsed-v4.0.4.asciidoc | 168 +++++ .../filters/elasticsearch-index.asciidoc | 22 + .../filters/elasticsearch-v3.1.3.asciidoc | 236 ++++++ .../filters/elasticsearch-v3.1.4.asciidoc | 237 ++++++ .../filters/elasticsearch-v3.1.5.asciidoc | 237 ++++++ .../filters/elasticsearch-v3.1.6.asciidoc | 237 ++++++ .../filters/elasticsearch-v3.2.0.asciidoc | 238 ++++++ .../filters/elasticsearch-v3.2.1.asciidoc | 238 ++++++ .../filters/emoji-index.asciidoc | 14 + .../filters/emoji-v1.0.1.asciidoc | 176 +++++ .../filters/emoji-v1.0.2.asciidoc | 176 +++++ .../filters/environment-index.asciidoc | 16 + .../filters/environment-v3.0.1.asciidoc | 83 +++ .../filters/environment-v3.0.2.asciidoc | 83 +++ .../filters/environment-v3.0.3.asciidoc | 83 +++ .../filters/example-index.asciidoc | 10 + .../filters/extractnumbers-index.asciidoc | 16 + .../filters/extractnumbers-v3.0.1.asciidoc | 61 ++ .../filters/extractnumbers-v3.0.2.asciidoc | 61 ++ .../filters/extractnumbers-v3.0.3.asciidoc | 61 ++ .../filters/fingerprint-index.asciidoc | 18 + .../filters/fingerprint-v3.0.4.asciidoc | 139 ++++ .../filters/fingerprint-v3.1.0.asciidoc | 153 ++++ .../filters/fingerprint-v3.1.1.asciidoc | 153 ++++ .../filters/fingerprint-v3.1.2.asciidoc | 153 ++++ .../filters/geoip-index.asciidoc | 28 + .../filters/geoip-v4.1.1.asciidoc | 180 +++++ .../filters/geoip-v4.2.0.asciidoc | 195 +++++ .../filters/geoip-v4.2.1.asciidoc | 196 +++++ .../filters/geoip-v4.3.0.asciidoc | 206 ++++++ .../filters/geoip-v4.3.1.asciidoc | 206 ++++++ .../filters/geoip-v5.0.0.asciidoc | 184 +++++ .../filters/geoip-v5.0.1.asciidoc | 184 +++++ .../filters/geoip-v5.0.2.asciidoc | 184 +++++ .../filters/geoip-v5.0.3.asciidoc | 184 +++++ .../filters/grok-index.asciidoc | 22 + .../filters/grok-v3.4.1.asciidoc | 332 +++++++++ .../filters/grok-v3.4.2.asciidoc | 333 +++++++++ .../filters/grok-v3.4.3.asciidoc | 332 +++++++++ .../filters/grok-v3.4.4.asciidoc | 332 +++++++++ .../filters/grok-v4.0.0.asciidoc | 332 +++++++++ .../filters/grok-v4.0.1.asciidoc | 332 +++++++++ .../filters/hashid-index.asciidoc | 14 + .../filters/hashid-v0.1.2.asciidoc | 110 +++ .../filters/hashid-v0.1.3.asciidoc | 110 +++ .../filters/i18n-index.asciidoc | 16 + .../filters/i18n-v3.0.1.asciidoc | 62 ++ .../filters/i18n-v3.0.2.asciidoc | 62 ++ .../filters/i18n-v3.0.3.asciidoc | 62 ++ .../filters/jdbc_static-index.asciidoc | 10 + .../filters/jdbc_streaming-index.asciidoc | 16 + .../filters/jdbc_streaming-v1.0.1.asciidoc | 226 ++++++ .../filters/jdbc_streaming-v1.0.2.asciidoc | 226 ++++++ .../filters/jdbc_streaming-v1.0.3.asciidoc | 226 ++++++ .../filters/json-index.asciidoc | 16 + .../filters/json-v3.0.3.asciidoc | 121 +++ .../filters/json-v3.0.4.asciidoc | 121 +++ .../filters/json-v3.0.5.asciidoc | 121 +++ .../filters/json_encode-index.asciidoc | 16 + .../filters/json_encode-v3.0.1.asciidoc | 76 ++ .../filters/json_encode-v3.0.2.asciidoc | 76 ++ .../filters/json_encode-v3.0.3.asciidoc | 76 ++ .../kubernetes_metadata-index.asciidoc | 10 + .../filters/kv-index.asciidoc | 16 + .../filters/kv-v4.0.1.asciidoc | 409 ++++++++++ .../filters/kv-v4.0.2.asciidoc | 409 ++++++++++ .../filters/kv-v4.0.3.asciidoc | 409 ++++++++++ .../filters/language-index.asciidoc | 10 + .../filters/lookup-index.asciidoc | 10 + .../filters/math-index.asciidoc | 10 + .../filters/metaevent-index.asciidoc | 14 + .../filters/metaevent-v2.0.5.asciidoc | 62 ++ .../filters/metaevent-v2.0.7.asciidoc | 62 ++ .../filters/metricize-index.asciidoc | 16 + .../filters/metricize-v3.0.1.asciidoc | 109 +++ .../filters/metricize-v3.0.2.asciidoc | 109 +++ .../filters/metricize-v3.0.3.asciidoc | 109 +++ .../filters/metrics-index.asciidoc | 16 + .../filters/metrics-v4.0.3.asciidoc | 228 ++++++ .../filters/metrics-v4.0.4.asciidoc | 228 ++++++ .../filters/metrics-v4.0.5.asciidoc | 228 ++++++ .../filters/multiline-index.asciidoc | 14 + .../filters/multiline-v3.0.3.asciidoc | 194 +++++ .../filters/multiline-v3.0.4.asciidoc | 194 +++++ .../filters/mutate-index.asciidoc | 18 + .../filters/mutate-v3.1.5.asciidoc | 282 +++++++ .../filters/mutate-v3.1.6.asciidoc | 283 +++++++ .../filters/mutate-v3.1.7.asciidoc | 283 +++++++ .../filters/mutate-v3.2.0.asciidoc | 287 +++++++ .../filters/oui-index.asciidoc | 14 + .../filters/oui-v3.0.1.asciidoc | 70 ++ .../filters/oui-v3.0.2.asciidoc | 70 ++ .../filters/prune-index.asciidoc | 16 + .../filters/prune-v3.0.1.asciidoc | 154 ++++ .../filters/prune-v3.0.2.asciidoc | 154 ++++ .../filters/prune-v3.0.3.asciidoc | 154 ++++ .../filters/punct-index.asciidoc | 14 + .../filters/punct-v2.0.5.asciidoc | 62 ++ .../filters/punct-v2.0.6.asciidoc | 62 ++ .../filters/range-index.asciidoc | 16 + .../filters/range-v3.0.1.asciidoc | 89 +++ .../filters/range-v3.0.2.asciidoc | 89 +++ .../filters/range-v3.0.3.asciidoc | 89 +++ .../filters/ruby-index.asciidoc | 22 + .../filters/ruby-v3.0.3.asciidoc | 82 ++ .../filters/ruby-v3.0.4.asciidoc | 82 ++ .../filters/ruby-v3.1.0.asciidoc | 192 +++++ .../filters/ruby-v3.1.1.asciidoc | 192 +++++ .../filters/ruby-v3.1.2.asciidoc | 192 +++++ .../filters/ruby-v3.1.3.asciidoc | 201 +++++ .../filters/script-index.asciidoc | 10 + .../filters/sleep-index.asciidoc | 16 + .../filters/sleep-v3.0.4.asciidoc | 119 +++ .../filters/sleep-v3.0.5.asciidoc | 119 +++ .../filters/sleep-v3.0.6.asciidoc | 119 +++ .../filters/split-index.asciidoc | 20 + .../filters/split-v3.1.2.asciidoc | 111 +++ .../filters/split-v3.1.3.asciidoc | 111 +++ .../filters/split-v3.1.4.asciidoc | 111 +++ .../filters/split-v3.1.5.asciidoc | 111 +++ .../filters/split-v3.1.6.asciidoc | 111 +++ .../filters/syslog_pri-index.asciidoc | 16 + .../filters/syslog_pri-v3.0.3.asciidoc | 85 +++ .../filters/syslog_pri-v3.0.4.asciidoc | 85 +++ .../filters/syslog_pri-v3.0.5.asciidoc | 85 +++ .../filters/throttle-index.asciidoc | 16 + .../filters/throttle-v4.0.2.asciidoc | 252 +++++++ .../filters/throttle-v4.0.3.asciidoc | 252 +++++++ .../filters/throttle-v4.0.4.asciidoc | 252 +++++++ .../filters/tld-index.asciidoc | 16 + .../filters/tld-v3.0.1.asciidoc | 73 ++ .../filters/tld-v3.0.2.asciidoc | 73 ++ .../filters/tld-v3.0.3.asciidoc | 73 ++ .../filters/translate-index.asciidoc | 16 + .../filters/translate-v3.0.2.asciidoc | 211 ++++++ .../filters/translate-v3.0.3.asciidoc | 211 ++++++ .../filters/translate-v3.0.4.asciidoc | 211 ++++++ .../filters/truncate-index.asciidoc | 16 + .../filters/truncate-v1.0.2.asciidoc | 84 +++ .../filters/truncate-v1.0.3.asciidoc | 84 +++ .../filters/truncate-v1.0.4.asciidoc | 84 +++ .../filters/unique-index.asciidoc | 16 + .../filters/unique-v2.0.5.asciidoc | 53 ++ .../filters/unique-v2.0.6.asciidoc | 53 ++ .../filters/unique-v3.0.0.asciidoc | 53 ++ .../filters/urldecode-index.asciidoc | 16 + .../filters/urldecode-v3.0.4.asciidoc | 83 +++ .../filters/urldecode-v3.0.5.asciidoc | 83 +++ .../filters/urldecode-v3.0.6.asciidoc | 83 +++ .../filters/useragent-index.asciidoc | 22 + .../filters/useragent-v3.1.0.asciidoc | 117 +++ .../filters/useragent-v3.1.1.asciidoc | 117 +++ .../filters/useragent-v3.1.3.asciidoc | 118 +++ .../filters/useragent-v3.2.0.asciidoc | 118 +++ .../filters/useragent-v3.2.1.asciidoc | 118 +++ .../filters/useragent-v3.2.2.asciidoc | 118 +++ .../filters/uuid-index.asciidoc | 16 + .../filters/uuid-v3.0.3.asciidoc | 95 +++ .../filters/uuid-v3.0.4.asciidoc | 95 +++ .../filters/uuid-v3.0.5.asciidoc | 95 +++ .../filters/xml-index.asciidoc | 16 + .../filters/xml-v4.0.3.asciidoc | 187 +++++ .../filters/xml-v4.0.4.asciidoc | 187 +++++ .../filters/xml-v4.0.5.asciidoc | 187 +++++ .../filters/yaml-index.asciidoc | 12 + .../filters/yaml-v0.1.1.asciidoc | 103 +++ .../filters/zeromq-index.asciidoc | 14 + .../filters/zeromq-v3.0.1.asciidoc | 148 ++++ .../filters/zeromq-v3.0.2.asciidoc | 148 ++++ docs/versioned-plugins/inputs-index.asciidoc | 74 ++ .../inputs/beats-index.asciidoc | 34 + .../inputs/beats-v4.0.1.asciidoc | 240 ++++++ .../inputs/beats-v4.0.2.asciidoc | 240 ++++++ .../inputs/beats-v4.0.3.asciidoc | 240 ++++++ .../inputs/beats-v4.0.4.asciidoc | 241 ++++++ .../inputs/beats-v4.0.5.asciidoc | 241 ++++++ .../inputs/beats-v5.0.0.asciidoc | 222 ++++++ .../inputs/beats-v5.0.1.asciidoc | 222 ++++++ .../inputs/beats-v5.0.2.asciidoc | 222 ++++++ .../inputs/beats-v5.0.3.asciidoc | 222 ++++++ .../inputs/beats-v5.0.4.asciidoc | 222 ++++++ .../inputs/beats-v5.0.5.asciidoc | 222 ++++++ .../inputs/beats-v5.0.6.asciidoc | 222 ++++++ .../inputs/cloudwatch-index.asciidoc | 16 + .../inputs/cloudwatch-v2.0.1.asciidoc | 266 +++++++ .../inputs/cloudwatch-v2.0.2.asciidoc | 266 +++++++ .../inputs/cloudwatch-v2.0.3.asciidoc | 266 +++++++ .../inputs/cloudwatch_logs-index.asciidoc | 10 + .../inputs/couchdb_changes-index.asciidoc | 16 + .../inputs/couchdb_changes-v3.1.2.asciidoc | 220 ++++++ .../inputs/couchdb_changes-v3.1.3.asciidoc | 220 ++++++ .../inputs/couchdb_changes-v3.1.4.asciidoc | 220 ++++++ .../inputs/dead_letter_queue-index.asciidoc | 24 + .../inputs/dead_letter_queue-v1.0.3.asciidoc | 108 +++ .../inputs/dead_letter_queue-v1.0.4.asciidoc | 109 +++ .../inputs/dead_letter_queue-v1.0.5.asciidoc | 109 +++ .../inputs/dead_letter_queue-v1.0.6.asciidoc | 109 +++ .../inputs/dead_letter_queue-v1.1.0.asciidoc | 109 +++ .../inputs/dead_letter_queue-v1.1.1.asciidoc | 112 +++ .../inputs/dead_letter_queue-v1.1.2.asciidoc | 112 +++ .../inputs/drupal_dblog-index.asciidoc | 14 + .../inputs/drupal_dblog-v2.0.5.asciidoc | 102 +++ .../inputs/drupal_dblog-v2.0.6.asciidoc | 102 +++ .../inputs/dynamodb-index.asciidoc | 10 + .../inputs/elasticsearch-index.asciidoc | 20 + .../inputs/elasticsearch-v4.0.4.asciidoc | 220 ++++++ .../inputs/elasticsearch-v4.0.5.asciidoc | 230 ++++++ .../inputs/elasticsearch-v4.0.6.asciidoc | 230 ++++++ .../inputs/elasticsearch-v4.1.0.asciidoc | 230 ++++++ .../inputs/elasticsearch-v4.1.1.asciidoc | 230 ++++++ .../inputs/eventlog-index.asciidoc | 14 + .../inputs/eventlog-v4.1.1.asciidoc | 74 ++ .../inputs/eventlog-v4.1.2.asciidoc | 74 ++ .../inputs/example-index.asciidoc | 10 + .../inputs/exec-index.asciidoc | 16 + .../inputs/exec-v3.1.3.asciidoc | 69 ++ .../inputs/exec-v3.1.4.asciidoc | 69 ++ .../inputs/exec-v3.1.5.asciidoc | 69 ++ .../inputs/file-index.asciidoc | 14 + .../inputs/file-v4.0.2.asciidoc | 256 +++++++ .../inputs/file-v4.0.3.asciidoc | 256 +++++++ .../inputs/fluentd-index.asciidoc | 10 + .../inputs/ganglia-index.asciidoc | 16 + .../inputs/ganglia-v3.1.1.asciidoc | 63 ++ .../inputs/ganglia-v3.1.2.asciidoc | 63 ++ .../inputs/ganglia-v3.1.3.asciidoc | 63 ++ .../inputs/gelf-index.asciidoc | 18 + .../inputs/gelf-v3.0.4.asciidoc | 105 +++ .../inputs/gelf-v3.0.5.asciidoc | 105 +++ .../inputs/gelf-v3.0.6.asciidoc | 105 +++ .../inputs/gelf-v3.0.7.asciidoc | 105 +++ .../inputs/gemfire-index.asciidoc | 14 + .../inputs/gemfire-v2.0.5.asciidoc | 132 ++++ .../inputs/gemfire-v2.0.6.asciidoc | 132 ++++ .../inputs/generator-index.asciidoc | 16 + .../inputs/generator-v3.0.3.asciidoc | 107 +++ .../inputs/generator-v3.0.4.asciidoc | 107 +++ .../inputs/generator-v3.0.5.asciidoc | 107 +++ .../inputs/github-index.asciidoc | 16 + .../inputs/github-v3.0.3.asciidoc | 81 ++ .../inputs/github-v3.0.4.asciidoc | 81 ++ .../inputs/github-v3.0.5.asciidoc | 81 ++ .../inputs/google_pubsub-index.asciidoc | 16 + .../inputs/google_pubsub-v1.0.1.asciidoc | 213 ++++++ .../inputs/google_pubsub-v1.0.3.asciidoc | 213 ++++++ .../inputs/google_pubsub-v1.0.4.asciidoc | 213 ++++++ .../inputs/googleanalytics-index.asciidoc | 10 + .../inputs/graphite-index.asciidoc | 14 + .../inputs/graphite-v3.0.3.asciidoc | 175 +++++ .../inputs/graphite-v3.0.4.asciidoc | 175 +++++ .../inputs/heartbeat-index.asciidoc | 16 + .../inputs/heartbeat-v3.0.3.asciidoc | 97 +++ .../inputs/heartbeat-v3.0.4.asciidoc | 97 +++ .../inputs/heartbeat-v3.0.5.asciidoc | 97 +++ .../inputs/heroku-index.asciidoc | 14 + .../inputs/heroku-v3.0.1.asciidoc | 66 ++ .../inputs/heroku-v3.0.2.asciidoc | 66 ++ .../inputs/http-index.asciidoc | 18 + .../inputs/http-v3.0.5.asciidoc | 168 +++++ .../inputs/http-v3.0.6.asciidoc | 168 +++++ .../inputs/http-v3.0.7.asciidoc | 165 +++++ .../inputs/http-v3.0.8.asciidoc | 178 +++++ .../inputs/http_poller-index.asciidoc | 30 + .../inputs/http_poller-v3.3.0.asciidoc | 400 ++++++++++ .../inputs/http_poller-v3.3.1.asciidoc | 401 ++++++++++ .../inputs/http_poller-v3.3.2.asciidoc | 401 ++++++++++ .../inputs/http_poller-v3.3.3.asciidoc | 401 ++++++++++ .../inputs/http_poller-v3.3.4.asciidoc | 391 ++++++++++ .../inputs/http_poller-v4.0.0.asciidoc | 380 ++++++++++ .../inputs/http_poller-v4.0.1.asciidoc | 380 ++++++++++ .../inputs/http_poller-v4.0.2.asciidoc | 380 ++++++++++ .../inputs/http_poller-v4.0.3.asciidoc | 379 ++++++++++ .../inputs/http_poller-v4.0.4.asciidoc | 379 ++++++++++ .../inputs/imap-index.asciidoc | 16 + .../inputs/imap-v3.0.3.asciidoc | 176 +++++ .../inputs/imap-v3.0.4.asciidoc | 176 +++++ .../inputs/imap-v3.0.5.asciidoc | 176 +++++ .../inputs/irc-index.asciidoc | 18 + .../inputs/irc-v3.0.3.asciidoc | 152 ++++ .../inputs/irc-v3.0.4.asciidoc | 152 ++++ .../inputs/irc-v3.0.5.asciidoc | 152 ++++ .../inputs/irc-v3.0.6.asciidoc | 152 ++++ .../inputs/jdbc-index.asciidoc | 26 + .../inputs/jdbc-v4.2.1.asciidoc | 486 ++++++++++++ .../inputs/jdbc-v4.2.2.asciidoc | 486 ++++++++++++ .../inputs/jdbc-v4.2.3.asciidoc | 486 ++++++++++++ .../inputs/jdbc-v4.2.4.asciidoc | 486 ++++++++++++ .../inputs/jdbc-v4.3.0.asciidoc | 486 ++++++++++++ .../inputs/jdbc-v4.3.1.asciidoc | 486 ++++++++++++ .../inputs/jdbc-v4.3.2.asciidoc | 486 ++++++++++++ .../inputs/jdbc-v4.3.3.asciidoc | 486 ++++++++++++ .../inputs/jms-index.asciidoc | 16 + .../inputs/jms-v3.0.2.asciidoc | 259 +++++++ .../inputs/jms-v3.0.3.asciidoc | 259 +++++++ .../inputs/jms-v3.0.4.asciidoc | 259 +++++++ .../inputs/jmx-index.asciidoc | 16 + .../inputs/jmx-pipe-index.asciidoc | 10 + .../inputs/jmx-v3.0.2.asciidoc | 157 ++++ .../inputs/jmx-v3.0.3.asciidoc | 157 ++++ .../inputs/jmx-v3.0.4.asciidoc | 157 ++++ .../inputs/journald-index.asciidoc | 12 + .../inputs/journald-v2.0.1.asciidoc | 152 ++++ .../inputs/kafka-index.asciidoc | 26 + .../inputs/kafka-v6.3.0.asciidoc | 551 ++++++++++++++ .../inputs/kafka-v6.3.2.asciidoc | 552 ++++++++++++++ .../inputs/kafka-v6.3.3.asciidoc | 553 ++++++++++++++ .../inputs/kafka-v6.3.4.asciidoc | 553 ++++++++++++++ .../inputs/kafka-v7.0.0.asciidoc | 566 ++++++++++++++ .../inputs/kafka-v8.0.0.asciidoc | 557 ++++++++++++++ .../inputs/kafka-v8.0.2.asciidoc | 557 ++++++++++++++ .../inputs/kafka-v8.0.4.asciidoc | 542 ++++++++++++++ .../inputs/kinesis-index.asciidoc | 18 + .../inputs/kinesis-v2.0.4.asciidoc | 105 +++ .../inputs/kinesis-v2.0.5.asciidoc | 112 +++ .../inputs/kinesis-v2.0.6.asciidoc | 112 +++ .../inputs/kinesis-v2.0.7.asciidoc | 112 +++ .../inputs/log4j-index.asciidoc | 18 + .../inputs/log4j-v3.0.6.asciidoc | 171 +++++ .../inputs/log4j-v3.1.0.asciidoc | 169 +++++ .../inputs/log4j-v3.1.1.asciidoc | 169 +++++ .../inputs/log4j-v3.1.2.asciidoc | 169 +++++ .../inputs/log4j2-index.asciidoc | 10 + .../inputs/lumberjack-index.asciidoc | 16 + .../inputs/lumberjack-v3.1.2.asciidoc | 112 +++ .../inputs/lumberjack-v3.1.3.asciidoc | 112 +++ .../inputs/lumberjack-v3.1.4.asciidoc | 112 +++ .../inputs/meetup-index.asciidoc | 16 + .../inputs/meetup-v3.0.1.asciidoc | 102 +++ .../inputs/meetup-v3.0.2.asciidoc | 102 +++ .../inputs/meetup-v3.0.3.asciidoc | 102 +++ .../inputs/mongodb-index.asciidoc | 10 + .../inputs/neo4j-index.asciidoc | 14 + .../inputs/neo4j-v2.0.5.asciidoc | 93 +++ .../inputs/neo4j-v2.0.6.asciidoc | 93 +++ .../inputs/netflow-index.asciidoc | 10 + .../inputs/perfmon-index.asciidoc | 10 + .../inputs/pipe-index.asciidoc | 16 + .../inputs/pipe-v3.0.4.asciidoc | 61 ++ .../inputs/pipe-v3.0.5.asciidoc | 61 ++ .../inputs/pipe-v3.0.6.asciidoc | 61 ++ .../inputs/puppet_facter-index.asciidoc | 16 + .../inputs/puppet_facter-v3.0.1.asciidoc | 106 +++ .../inputs/puppet_facter-v3.0.2.asciidoc | 106 +++ .../inputs/puppet_facter-v3.0.3.asciidoc | 106 +++ .../inputs/rabbitmq-index.asciidoc | 20 + .../inputs/rabbitmq-v5.2.4.asciidoc | 415 +++++++++++ .../inputs/rabbitmq-v5.2.5.asciidoc | 415 +++++++++++ .../inputs/rabbitmq-v6.0.0.asciidoc | 395 ++++++++++ .../inputs/rabbitmq-v6.0.1.asciidoc | 395 ++++++++++ .../inputs/rabbitmq-v6.0.2.asciidoc | 395 ++++++++++ .../inputs/rackspace-index.asciidoc | 12 + .../inputs/rackspace-v3.0.1.asciidoc | 102 +++ .../inputs/redis-index.asciidoc | 18 + .../inputs/redis-v3.1.3.asciidoc | 139 ++++ .../inputs/redis-v3.1.4.asciidoc | 139 ++++ .../inputs/redis-v3.1.5.asciidoc | 139 ++++ .../inputs/redis-v3.1.6.asciidoc | 139 ++++ .../inputs/relp-index.asciidoc | 16 + .../inputs/relp-v3.0.1.asciidoc | 126 ++++ .../inputs/relp-v3.0.2.asciidoc | 126 ++++ .../inputs/relp-v3.0.3.asciidoc | 126 ++++ .../inputs/rss-index.asciidoc | 16 + .../inputs/rss-v3.0.2.asciidoc | 70 ++ .../inputs/rss-v3.0.3.asciidoc | 70 ++ .../inputs/rss-v3.0.4.asciidoc | 70 ++ .../inputs/s3-index.asciidoc | 22 + .../inputs/s3-v3.1.5.asciidoc | 214 ++++++ .../inputs/s3-v3.1.6.asciidoc | 214 ++++++ .../inputs/s3-v3.1.7.asciidoc | 214 ++++++ .../inputs/s3-v3.1.8.asciidoc | 214 ++++++ .../inputs/s3-v3.1.9.asciidoc | 215 ++++++ .../inputs/s3-v3.2.0.asciidoc | 215 ++++++ .../inputs/s3sqs-index.asciidoc | 10 + .../inputs/salesforce-index.asciidoc | 14 + .../inputs/salesforce-v3.0.2.asciidoc | 199 +++++ .../inputs/salesforce-v3.0.3.asciidoc | 199 +++++ .../inputs/snmptrap-index.asciidoc | 16 + .../inputs/snmptrap-v3.0.3.asciidoc | 88 +++ .../inputs/snmptrap-v3.0.4.asciidoc | 88 +++ .../inputs/snmptrap-v3.0.5.asciidoc | 88 +++ .../inputs/sqlite-index.asciidoc | 16 + .../inputs/sqlite-v3.0.1.asciidoc | 124 ++++ .../inputs/sqlite-v3.0.2.asciidoc | 124 ++++ .../inputs/sqlite-v3.0.3.asciidoc | 124 ++++ .../inputs/sqs-index.asciidoc | 18 + .../inputs/sqs-v3.0.3.asciidoc | 219 ++++++ .../inputs/sqs-v3.0.4.asciidoc | 220 ++++++ .../inputs/sqs-v3.0.5.asciidoc | 220 ++++++ .../inputs/sqs-v3.0.6.asciidoc | 220 ++++++ .../inputs/stdin-index.asciidoc | 16 + .../inputs/stdin-v3.2.3.asciidoc | 39 + .../inputs/stdin-v3.2.4.asciidoc | 35 + .../inputs/stdin-v3.2.5.asciidoc | 35 + .../inputs/stomp-index.asciidoc | 18 + .../inputs/stomp-v3.0.4.asciidoc | 119 +++ .../inputs/stomp-v3.0.5.asciidoc | 119 +++ .../inputs/stomp-v3.0.6.asciidoc | 119 +++ .../inputs/stomp-v3.0.7.asciidoc | 119 +++ .../inputs/syslog-index.asciidoc | 18 + .../inputs/syslog-v3.2.1.asciidoc | 144 ++++ .../inputs/syslog-v3.2.2.asciidoc | 144 ++++ .../inputs/syslog-v3.2.3.asciidoc | 144 ++++ .../inputs/syslog-v3.2.4.asciidoc | 144 ++++ .../inputs/tcp-index.asciidoc | 26 + .../inputs/tcp-v4.1.2.asciidoc | 205 +++++ .../inputs/tcp-v4.2.2.asciidoc | 205 +++++ .../inputs/tcp-v4.2.3.asciidoc | 205 +++++ .../inputs/tcp-v4.2.4.asciidoc | 205 +++++ .../inputs/tcp-v5.0.0.asciidoc | 187 +++++ .../inputs/tcp-v5.0.1.asciidoc | 187 +++++ .../inputs/tcp-v5.0.2.asciidoc | 187 +++++ .../inputs/tcp-v5.0.3.asciidoc | 187 +++++ .../inputs/twitter-index.asciidoc | 18 + .../inputs/twitter-v3.0.4.asciidoc | 225 ++++++ .../inputs/twitter-v3.0.5.asciidoc | 226 ++++++ .../inputs/twitter-v3.0.6.asciidoc | 226 ++++++ .../inputs/twitter-v3.0.7.asciidoc | 226 ++++++ .../inputs/udp-index.asciidoc | 20 + .../inputs/udp-v3.1.1.asciidoc | 106 +++ .../inputs/udp-v3.1.2.asciidoc | 106 +++ .../inputs/udp-v3.1.3.asciidoc | 106 +++ .../inputs/udp-v3.2.0.asciidoc | 106 +++ .../inputs/udp-v3.2.1.asciidoc | 106 +++ .../inputs/unix-index.asciidoc | 16 + .../inputs/unix-v3.0.4.asciidoc | 103 +++ .../inputs/unix-v3.0.5.asciidoc | 103 +++ .../inputs/unix-v3.0.6.asciidoc | 103 +++ .../inputs/varnishlog-index.asciidoc | 16 + .../inputs/varnishlog-v3.0.1.asciidoc | 52 ++ .../inputs/varnishlog-v3.0.2.asciidoc | 52 ++ .../inputs/varnishlog-v3.0.3.asciidoc | 52 ++ .../inputs/websocket-index.asciidoc | 16 + .../inputs/websocket-v4.0.1.asciidoc | 64 ++ .../inputs/websocket-v4.0.2.asciidoc | 64 ++ .../inputs/websocket-v4.0.3.asciidoc | 64 ++ .../inputs/wmi-index.asciidoc | 16 + .../inputs/wmi-v3.0.1.asciidoc | 119 +++ .../inputs/wmi-v3.0.2.asciidoc | 119 +++ .../inputs/wmi-v3.0.3.asciidoc | 119 +++ .../inputs/xmpp-index.asciidoc | 18 + .../inputs/xmpp-v3.1.3.asciidoc | 87 +++ .../inputs/xmpp-v3.1.4.asciidoc | 87 +++ .../inputs/xmpp-v3.1.5.asciidoc | 87 +++ .../inputs/xmpp-v3.1.6.asciidoc | 87 +++ .../inputs/zenoss-index.asciidoc | 14 + .../inputs/zenoss-v2.0.5.asciidoc | 398 ++++++++++ .../inputs/zenoss-v2.0.6.asciidoc | 398 ++++++++++ .../inputs/zeromq-index.asciidoc | 12 + .../inputs/zeromq-v3.0.3.asciidoc | 159 ++++ docs/versioned-plugins/outputs-index.asciidoc | 76 ++ .../outputs/beats-index.asciidoc | 10 + .../outputs/boundary-index.asciidoc | 16 + .../outputs/boundary-v3.0.2.asciidoc | 139 ++++ .../outputs/boundary-v3.0.3.asciidoc | 139 ++++ .../outputs/boundary-v3.0.4.asciidoc | 139 ++++ .../outputs/circonus-index.asciidoc | 16 + .../outputs/circonus-v3.0.1.asciidoc | 93 +++ .../outputs/circonus-v3.0.3.asciidoc | 91 +++ .../outputs/circonus-v3.0.4.asciidoc | 91 +++ .../outputs/cloudwatch-index.asciidoc | 16 + .../outputs/cloudwatch-v3.0.5.asciidoc | 317 ++++++++ .../outputs/cloudwatch-v3.0.6.asciidoc | 317 ++++++++ .../outputs/cloudwatch-v3.0.7.asciidoc | 317 ++++++++ .../outputs/csv-index.asciidoc | 16 + .../outputs/csv-v3.0.4.asciidoc | 175 +++++ .../outputs/csv-v3.0.5.asciidoc | 175 +++++ .../outputs/csv-v3.0.6.asciidoc | 175 +++++ .../outputs/datadog-index.asciidoc | 16 + .../outputs/datadog-v3.0.1.asciidoc | 124 ++++ .../outputs/datadog-v3.0.3.asciidoc | 122 +++ .../outputs/datadog-v3.0.4.asciidoc | 122 +++ .../outputs/datadog_metrics-index.asciidoc | 14 + .../outputs/datadog_metrics-v3.0.1.asciidoc | 130 ++++ .../outputs/datadog_metrics-v3.0.2.asciidoc | 130 ++++ .../outputs/elasticsearch-index.asciidoc | 44 ++ .../outputs/elasticsearch-v7.3.2.asciidoc | 679 +++++++++++++++++ .../outputs/elasticsearch-v7.3.3.asciidoc | 679 +++++++++++++++++ .../outputs/elasticsearch-v7.3.4.asciidoc | 679 +++++++++++++++++ .../outputs/elasticsearch-v7.3.5.asciidoc | 679 +++++++++++++++++ .../outputs/elasticsearch-v7.3.6.asciidoc | 680 +++++++++++++++++ .../outputs/elasticsearch-v7.3.7.asciidoc | 680 +++++++++++++++++ .../outputs/elasticsearch-v7.3.8.asciidoc | 680 +++++++++++++++++ .../outputs/elasticsearch-v7.4.0.asciidoc | 681 +++++++++++++++++ .../outputs/elasticsearch-v7.4.1.asciidoc | 681 +++++++++++++++++ .../outputs/elasticsearch-v7.4.2.asciidoc | 698 ++++++++++++++++++ .../outputs/elasticsearch-v8.0.0.asciidoc | 662 +++++++++++++++++ .../outputs/elasticsearch-v8.0.1.asciidoc | 662 +++++++++++++++++ .../outputs/elasticsearch-v8.1.1.asciidoc | 663 +++++++++++++++++ .../outputs/elasticsearch-v8.2.0.asciidoc | 663 +++++++++++++++++ .../outputs/elasticsearch-v8.2.2.asciidoc | 680 +++++++++++++++++ .../outputs/elasticsearch-v9.0.0.asciidoc | 684 +++++++++++++++++ .../outputs/elasticsearch-v9.0.2.asciidoc | 686 +++++++++++++++++ .../outputs/elasticsearch_java-index.asciidoc | 12 + .../elasticsearch_java-v2.1.4.asciidoc | 491 ++++++++++++ .../outputs/email-index.asciidoc | 14 + .../outputs/email-v4.0.4.asciidoc | 233 ++++++ .../outputs/email-v4.0.6.asciidoc | 235 ++++++ .../outputs/example-index.asciidoc | 10 + .../outputs/exec-index.asciidoc | 16 + .../outputs/exec-v3.1.1.asciidoc | 86 +++ .../outputs/exec-v3.1.2.asciidoc | 86 +++ .../outputs/exec-v3.1.3.asciidoc | 86 +++ .../outputs/file-index.asciidoc | 22 + .../outputs/file-v4.0.2.asciidoc | 136 ++++ .../outputs/file-v4.1.0.asciidoc | 136 ++++ .../outputs/file-v4.1.1.asciidoc | 136 ++++ .../outputs/file-v4.1.2.asciidoc | 136 ++++ .../outputs/file-v4.2.0.asciidoc | 136 ++++ .../outputs/file-v4.2.1.asciidoc | 136 ++++ .../outputs/firehose-index.asciidoc | 10 + .../outputs/ganglia-index.asciidoc | 16 + .../outputs/ganglia-v3.0.3.asciidoc | 139 ++++ .../outputs/ganglia-v3.0.4.asciidoc | 139 ++++ .../outputs/ganglia-v3.0.5.asciidoc | 139 ++++ .../outputs/gelf-index.asciidoc | 14 + .../outputs/gelf-v3.1.3.asciidoc | 167 +++++ .../outputs/gelf-v3.1.4.asciidoc | 167 +++++ .../outputs/gemfire-index.asciidoc | 14 + .../outputs/gemfire-v2.0.5.asciidoc | 100 +++ .../outputs/gemfire-v2.0.6.asciidoc | 100 +++ .../outputs/google_bigquery-index.asciidoc | 16 + .../outputs/google_bigquery-v3.2.1.asciidoc | 302 ++++++++ .../outputs/google_bigquery-v3.2.2.asciidoc | 302 ++++++++ .../outputs/google_bigquery-v3.2.3.asciidoc | 302 ++++++++ .../google_cloud_storage-index.asciidoc | 14 + .../google_cloud_storage-v3.0.3.asciidoc | 206 ++++++ .../google_cloud_storage-v3.0.4.asciidoc | 206 ++++++ .../outputs/graphite-index.asciidoc | 16 + .../outputs/graphite-v3.1.2.asciidoc | 173 +++++ .../outputs/graphite-v3.1.3.asciidoc | 173 +++++ .../outputs/graphite-v3.1.4.asciidoc | 173 +++++ .../outputs/graphtastic-index.asciidoc | 16 + .../outputs/graphtastic-v3.0.1.asciidoc | 148 ++++ .../outputs/graphtastic-v3.0.2.asciidoc | 148 ++++ .../outputs/graphtastic-v3.0.3.asciidoc | 148 ++++ .../outputs/hipchat-index.asciidoc | 12 + .../outputs/hipchat-v4.0.3.asciidoc | 122 +++ .../outputs/http-index.asciidoc | 30 + .../outputs/http-v4.3.0.asciidoc | 380 ++++++++++ .../outputs/http-v4.3.1.asciidoc | 381 ++++++++++ .../outputs/http-v4.3.2.asciidoc | 377 ++++++++++ .../outputs/http-v4.3.4.asciidoc | 379 ++++++++++ .../outputs/http-v4.4.0.asciidoc | 389 ++++++++++ .../outputs/http-v5.0.0.asciidoc | 369 +++++++++ .../outputs/http-v5.0.1.asciidoc | 369 +++++++++ .../outputs/http-v5.1.0.asciidoc | 379 ++++++++++ .../outputs/http-v5.1.1.asciidoc | 379 ++++++++++ .../outputs/http-v5.1.2.asciidoc | 379 ++++++++++ .../outputs/icinga-index.asciidoc | 10 + .../outputs/influxdb-index.asciidoc | 16 + .../outputs/influxdb-v5.0.1.asciidoc | 270 +++++++ .../outputs/influxdb-v5.0.2.asciidoc | 270 +++++++ .../outputs/influxdb-v5.0.3.asciidoc | 270 +++++++ .../outputs/irc-index.asciidoc | 16 + .../outputs/irc-v3.0.3.asciidoc | 157 ++++ .../outputs/irc-v3.0.4.asciidoc | 157 ++++ .../outputs/irc-v3.0.5.asciidoc | 157 ++++ .../outputs/jira-index.asciidoc | 14 + .../outputs/jira-v3.0.2.asciidoc | 195 +++++ .../outputs/jira-v3.0.3.asciidoc | 195 +++++ .../outputs/jms-index.asciidoc | 12 + .../outputs/jms-v3.0.1.asciidoc | 175 +++++ .../outputs/juggernaut-index.asciidoc | 16 + .../outputs/juggernaut-v3.0.3.asciidoc | 115 +++ .../outputs/juggernaut-v3.0.4.asciidoc | 115 +++ .../outputs/juggernaut-v3.0.5.asciidoc | 115 +++ .../outputs/kafka-index.asciidoc | 26 + .../outputs/kafka-v6.2.0.asciidoc | 449 +++++++++++ .../outputs/kafka-v6.2.1.asciidoc | 450 +++++++++++ .../outputs/kafka-v6.2.2.asciidoc | 451 +++++++++++ .../outputs/kafka-v7.0.0.asciidoc | 418 +++++++++++ .../outputs/kafka-v7.0.1.asciidoc | 418 +++++++++++ .../outputs/kafka-v7.0.3.asciidoc | 425 +++++++++++ .../outputs/kafka-v7.0.4.asciidoc | 425 +++++++++++ .../outputs/kafka-v7.0.6.asciidoc | 425 +++++++++++ .../outputs/librato-index.asciidoc | 16 + .../outputs/librato-v3.0.2.asciidoc | 162 ++++ .../outputs/librato-v3.0.4.asciidoc | 162 ++++ .../outputs/librato-v3.0.5.asciidoc | 162 ++++ .../outputs/logentries-index.asciidoc | 10 + .../outputs/loggly-index.asciidoc | 16 + .../outputs/loggly-v3.0.1.asciidoc | 164 ++++ .../outputs/loggly-v3.0.2.asciidoc | 164 ++++ .../outputs/loggly-v3.0.3.asciidoc | 164 ++++ .../outputs/lumberjack-index.asciidoc | 14 + .../outputs/lumberjack-v3.1.3.asciidoc | 101 +++ .../outputs/lumberjack-v3.1.5.asciidoc | 102 +++ .../outputs/metriccatcher-index.asciidoc | 16 + .../outputs/metriccatcher-v3.0.1.asciidoc | 164 ++++ .../outputs/metriccatcher-v3.0.2.asciidoc | 164 ++++ .../outputs/metriccatcher-v3.0.3.asciidoc | 164 ++++ .../outputs/monasca_log_api-index.asciidoc | 12 + .../outputs/monasca_log_api-v1.0.2.asciidoc | 179 +++++ .../outputs/mongodb-index.asciidoc | 16 + .../outputs/mongodb-v3.1.1.asciidoc | 134 ++++ .../outputs/mongodb-v3.1.2.asciidoc | 134 ++++ .../outputs/mongodb-v3.1.3.asciidoc | 134 ++++ .../outputs/nagios-index.asciidoc | 16 + .../outputs/nagios-v3.0.3.asciidoc | 90 +++ .../outputs/nagios-v3.0.4.asciidoc | 90 +++ .../outputs/nagios-v3.0.5.asciidoc | 90 +++ .../outputs/nagios_nsca-index.asciidoc | 16 + .../outputs/nagios_nsca-v3.0.3.asciidoc | 141 ++++ .../outputs/nagios_nsca-v3.0.4.asciidoc | 141 ++++ .../outputs/nagios_nsca-v3.0.5.asciidoc | 141 ++++ .../outputs/neo4j-index.asciidoc | 12 + .../outputs/neo4j-v2.0.5.asciidoc | 53 ++ .../outputs/newrelic-index.asciidoc | 10 + .../outputs/null-index.asciidoc | 14 + .../outputs/null-v3.0.3.asciidoc | 37 + .../outputs/null-v3.0.4.asciidoc | 37 + .../outputs/opentsdb-index.asciidoc | 16 + .../outputs/opentsdb-v3.1.2.asciidoc | 87 +++ .../outputs/opentsdb-v3.1.3.asciidoc | 87 +++ .../outputs/opentsdb-v3.1.4.asciidoc | 87 +++ .../outputs/pagerduty-index.asciidoc | 16 + .../outputs/pagerduty-v3.0.4.asciidoc | 105 +++ .../outputs/pagerduty-v3.0.5.asciidoc | 105 +++ .../outputs/pagerduty-v3.0.6.asciidoc | 105 +++ .../outputs/pipe-index.asciidoc | 16 + .../outputs/pipe-v3.0.3.asciidoc | 80 ++ .../outputs/pipe-v3.0.4.asciidoc | 80 ++ .../outputs/pipe-v3.0.5.asciidoc | 80 ++ .../outputs/rabbitmq-index.asciidoc | 24 + .../outputs/rabbitmq-v4.0.8.asciidoc | 279 +++++++ .../outputs/rabbitmq-v4.0.9.asciidoc | 293 ++++++++ .../outputs/rabbitmq-v5.0.0.asciidoc | 266 +++++++ .../outputs/rabbitmq-v5.0.1.asciidoc | 266 +++++++ .../outputs/rabbitmq-v5.0.2.asciidoc | 266 +++++++ .../outputs/rabbitmq-v5.0.3.asciidoc | 266 +++++++ .../outputs/rabbitmq-v5.1.0.asciidoc | 266 +++++++ .../outputs/rackspace-index.asciidoc | 14 + .../outputs/rackspace-v2.0.5.asciidoc | 91 +++ .../outputs/rackspace-v2.0.7.asciidoc | 91 +++ .../outputs/rados-index.asciidoc | 10 + .../outputs/redis-index.asciidoc | 22 + .../outputs/redis-v3.0.4.asciidoc | 221 ++++++ .../outputs/redis-v3.0.5.asciidoc | 221 ++++++ .../outputs/redis-v4.0.0.asciidoc | 202 +++++ .../outputs/redis-v4.0.1.asciidoc | 202 +++++ .../outputs/redis-v4.0.2.asciidoc | 202 +++++ .../outputs/redis-v4.0.3.asciidoc | 202 +++++ .../outputs/redmine-index.asciidoc | 16 + .../outputs/redmine-v3.0.1.asciidoc | 192 +++++ .../outputs/redmine-v3.0.2.asciidoc | 192 +++++ .../outputs/redmine-v3.0.3.asciidoc | 192 +++++ .../outputs/riak-index.asciidoc | 16 + .../outputs/riak-v3.0.1.asciidoc | 177 +++++ .../outputs/riak-v3.0.2.asciidoc | 177 +++++ .../outputs/riak-v3.0.3.asciidoc | 177 +++++ .../outputs/riemann-index.asciidoc | 16 + .../outputs/riemann-v3.0.1.asciidoc | 178 +++++ .../outputs/riemann-v3.0.2.asciidoc | 178 +++++ .../outputs/riemann-v3.0.3.asciidoc | 178 +++++ .../outputs/s3-index.asciidoc | 14 + .../outputs/s3-v4.0.8.asciidoc | 327 ++++++++ .../outputs/s3-v4.0.9.asciidoc | 327 ++++++++ .../outputs/slack-index.asciidoc | 14 + .../outputs/slack-v2.0.3.asciidoc | 107 +++ .../outputs/slack-v2.1.0.asciidoc | 107 +++ .../outputs/sns-index.asciidoc | 16 + .../outputs/sns-v4.0.4.asciidoc | 162 ++++ .../outputs/sns-v4.0.5.asciidoc | 162 ++++ .../outputs/sns-v4.0.6.asciidoc | 162 ++++ .../outputs/solr_http-index.asciidoc | 16 + .../outputs/solr_http-v3.0.2.asciidoc | 92 +++ .../outputs/solr_http-v3.0.3.asciidoc | 92 +++ .../outputs/solr_http-v3.0.4.asciidoc | 92 +++ .../outputs/sqs-index.asciidoc | 20 + .../outputs/sqs-v4.0.2.asciidoc | 218 ++++++ .../outputs/sqs-v4.0.3.asciidoc | 218 ++++++ .../outputs/sqs-v5.0.0.asciidoc | 197 +++++ .../outputs/sqs-v5.0.1.asciidoc | 197 +++++ .../outputs/sqs-v5.0.2.asciidoc | 197 +++++ .../outputs/statsd-index.asciidoc | 16 + .../outputs/statsd-v3.1.2.asciidoc | 193 +++++ .../outputs/statsd-v3.1.3.asciidoc | 193 +++++ .../outputs/statsd-v3.1.4.asciidoc | 193 +++++ .../outputs/stdout-index.asciidoc | 16 + .../outputs/stdout-v3.1.1.asciidoc | 64 ++ .../outputs/stdout-v3.1.2.asciidoc | 60 ++ .../outputs/stdout-v3.1.3.asciidoc | 60 ++ .../outputs/stomp-index.asciidoc | 16 + .../outputs/stomp-v3.0.5.asciidoc | 123 +++ .../outputs/stomp-v3.0.7.asciidoc | 123 +++ .../outputs/stomp-v3.0.8.asciidoc | 123 +++ .../outputs/syslog-index.asciidoc | 16 + .../outputs/syslog-v3.0.2.asciidoc | 239 ++++++ .../outputs/syslog-v3.0.3.asciidoc | 239 ++++++ .../outputs/syslog-v3.0.4.asciidoc | 239 ++++++ .../outputs/tcp-index.asciidoc | 20 + .../outputs/tcp-v4.0.1.asciidoc | 158 ++++ .../outputs/tcp-v4.0.2.asciidoc | 158 ++++ .../outputs/tcp-v5.0.0.asciidoc | 144 ++++ .../outputs/tcp-v5.0.1.asciidoc | 144 ++++ .../outputs/tcp-v5.0.2.asciidoc | 144 ++++ .../outputs/timber-index.asciidoc | 12 + .../outputs/timber-v1.0.3.asciidoc | 228 ++++++ .../outputs/udp-index.asciidoc | 16 + .../outputs/udp-v3.0.3.asciidoc | 65 ++ .../outputs/udp-v3.0.4.asciidoc | 65 ++ .../outputs/udp-v3.0.5.asciidoc | 65 ++ .../outputs/webhdfs-index.asciidoc | 16 + .../outputs/webhdfs-v3.0.3.asciidoc | 293 ++++++++ .../outputs/webhdfs-v3.0.4.asciidoc | 293 ++++++++ .../outputs/webhdfs-v3.0.5.asciidoc | 293 ++++++++ .../outputs/websocket-index.asciidoc | 16 + .../outputs/websocket-v3.0.2.asciidoc | 66 ++ .../outputs/websocket-v3.0.3.asciidoc | 66 ++ .../outputs/websocket-v3.0.4.asciidoc | 66 ++ .../outputs/xmpp-index.asciidoc | 20 + .../outputs/xmpp-v3.0.3.asciidoc | 104 +++ .../outputs/xmpp-v3.0.4.asciidoc | 105 +++ .../outputs/xmpp-v3.0.5.asciidoc | 105 +++ .../outputs/xmpp-v3.0.6.asciidoc | 105 +++ .../outputs/xmpp-v3.0.7.asciidoc | 105 +++ .../outputs/zabbix-index.asciidoc | 16 + .../outputs/zabbix-v3.0.2.asciidoc | 160 ++++ .../outputs/zabbix-v3.0.3.asciidoc | 160 ++++ .../outputs/zabbix-v3.0.4.asciidoc | 160 ++++ .../outputs/zeromq-index.asciidoc | 14 + .../outputs/zeromq-v3.1.1.asciidoc | 126 ++++ .../outputs/zeromq-v3.1.2.asciidoc | 126 ++++ .../outputs/zookeeper-index.asciidoc | 10 + 923 files changed, 129323 insertions(+) create mode 100644 docs/versioned-plugins/codecs-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/avro-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/avro-v3.2.0.asciidoc create mode 100644 docs/versioned-plugins/codecs/avro-v3.2.1.asciidoc create mode 100644 docs/versioned-plugins/codecs/avro-v3.2.2.asciidoc create mode 100644 docs/versioned-plugins/codecs/avro-v3.2.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/cef-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/cef-v4.1.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/cef-v4.1.4.asciidoc create mode 100644 docs/versioned-plugins/codecs/cef-v5.0.0.asciidoc create mode 100644 docs/versioned-plugins/codecs/cef-v5.0.1.asciidoc create mode 100644 docs/versioned-plugins/codecs/cef-v5.0.2.asciidoc create mode 100644 docs/versioned-plugins/codecs/cloudfront-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/cloudfront-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/codecs/cloudfront-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/codecs/cloudfront-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/cloudtrail-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/cloudtrail-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/codecs/cloudtrail-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/collectd-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/collectd-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/codecs/collectd-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/codecs/collectd-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/codecs/collectd-v3.0.7.asciidoc create mode 100644 docs/versioned-plugins/codecs/collectd-v3.0.8.asciidoc create mode 100644 docs/versioned-plugins/codecs/compress_spooler-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/compress_spooler-v2.0.5.asciidoc create mode 100644 docs/versioned-plugins/codecs/compress_spooler-v2.0.6.asciidoc create mode 100644 docs/versioned-plugins/codecs/csv-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/csv-v0.1.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/csv-v0.1.4.asciidoc create mode 100644 docs/versioned-plugins/codecs/dots-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/dots-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/dots-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/codecs/dots-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/codecs/edn-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/edn-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/edn-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/codecs/edn-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/codecs/edn_lines-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/edn_lines-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/edn_lines-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/codecs/edn_lines-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/codecs/es_bulk-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/es_bulk-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/codecs/es_bulk-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/codecs/es_bulk-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/codecs/example-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/fluent-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/fluent-v3.1.2.asciidoc create mode 100644 docs/versioned-plugins/codecs/fluent-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/fluent-v3.1.4.asciidoc create mode 100644 docs/versioned-plugins/codecs/fluent-v3.1.5.asciidoc create mode 100644 docs/versioned-plugins/codecs/graphite-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/graphite-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/graphite-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/codecs/graphite-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/codecs/gzip_lines-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/gzip_lines-v3.0.0.asciidoc create mode 100644 docs/versioned-plugins/codecs/gzip_lines-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/codecs/gzip_lines-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/codecs/gzip_lines-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/json-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/json-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/json-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/codecs/json-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/codecs/json_lines-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/json_lines-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/json_lines-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/codecs/json_lines-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/codecs/json_pretty-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/line-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/line-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/line-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/codecs/line-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/codecs/line-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/codecs/line-v3.0.7.asciidoc create mode 100644 docs/versioned-plugins/codecs/line-v3.0.8.asciidoc create mode 100644 docs/versioned-plugins/codecs/msgpack-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/msgpack-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/msgpack-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/codecs/msgpack-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/codecs/msgpack-v3.0.7.asciidoc create mode 100644 docs/versioned-plugins/codecs/multiline-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/multiline-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/codecs/multiline-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/codecs/multiline-v3.0.7.asciidoc create mode 100644 docs/versioned-plugins/codecs/multiline-v3.0.8.asciidoc create mode 100644 docs/versioned-plugins/codecs/netflow-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/netflow-v3.10.0.asciidoc create mode 100644 docs/versioned-plugins/codecs/netflow-v3.4.1.asciidoc create mode 100644 docs/versioned-plugins/codecs/netflow-v3.5.0.asciidoc create mode 100644 docs/versioned-plugins/codecs/netflow-v3.5.1.asciidoc create mode 100644 docs/versioned-plugins/codecs/netflow-v3.5.2.asciidoc create mode 100644 docs/versioned-plugins/codecs/netflow-v3.6.0.asciidoc create mode 100644 docs/versioned-plugins/codecs/netflow-v3.7.0.asciidoc create mode 100644 docs/versioned-plugins/codecs/netflow-v3.7.1.asciidoc create mode 100644 docs/versioned-plugins/codecs/netflow-v3.8.0.asciidoc create mode 100644 docs/versioned-plugins/codecs/netflow-v3.8.1.asciidoc create mode 100644 docs/versioned-plugins/codecs/netflow-v3.8.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/netflow-v3.9.0.asciidoc create mode 100644 docs/versioned-plugins/codecs/netflow-v3.9.1.asciidoc create mode 100644 docs/versioned-plugins/codecs/nmap-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/nmap-v0.0.19.asciidoc create mode 100644 docs/versioned-plugins/codecs/nmap-v0.0.20.asciidoc create mode 100644 docs/versioned-plugins/codecs/nmap-v0.0.21.asciidoc create mode 100644 docs/versioned-plugins/codecs/oldlogstashjson-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/oldlogstashjson-v2.0.5.asciidoc create mode 100644 docs/versioned-plugins/codecs/oldlogstashjson-v2.0.7.asciidoc create mode 100644 docs/versioned-plugins/codecs/plain-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/plain-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/plain-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/codecs/plain-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/codecs/plain-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/codecs/pretty-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/protobuf-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/protobuf-v1.0.2.asciidoc create mode 100644 docs/versioned-plugins/codecs/protobuf-v1.0.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/rubydebug-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/codecs/rubydebug-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/codecs/s3plain-index.asciidoc create mode 100644 docs/versioned-plugins/codecs/s3plain-v2.0.5.asciidoc create mode 100644 docs/versioned-plugins/codecs/s3plain-v2.0.6.asciidoc create mode 100644 docs/versioned-plugins/codecs/s3plain-v2.0.7.asciidoc create mode 100644 docs/versioned-plugins/codecs/sflow-index.asciidoc create mode 100644 docs/versioned-plugins/filters-index.asciidoc create mode 100644 docs/versioned-plugins/filters/age-index.asciidoc create mode 100644 docs/versioned-plugins/filters/age-v1.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/age-v1.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/aggregate-index.asciidoc create mode 100644 docs/versioned-plugins/filters/aggregate-v2.6.0.asciidoc create mode 100644 docs/versioned-plugins/filters/aggregate-v2.6.1.asciidoc create mode 100644 docs/versioned-plugins/filters/aggregate-v2.6.3.asciidoc create mode 100644 docs/versioned-plugins/filters/aggregate-v2.6.4.asciidoc create mode 100644 docs/versioned-plugins/filters/aggregate-v2.7.0.asciidoc create mode 100644 docs/versioned-plugins/filters/aggregate-v2.7.1.asciidoc create mode 100644 docs/versioned-plugins/filters/aggregate-v2.7.2.asciidoc create mode 100644 docs/versioned-plugins/filters/alter-index.asciidoc create mode 100644 docs/versioned-plugins/filters/alter-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/alter-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/alter-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/anonymize-index.asciidoc create mode 100644 docs/versioned-plugins/filters/anonymize-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/anonymize-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/filters/anonymize-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/filters/bytesize-index.asciidoc create mode 100644 docs/versioned-plugins/filters/checksum-index.asciidoc create mode 100644 docs/versioned-plugins/filters/checksum-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/checksum-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/cidr-index.asciidoc create mode 100644 docs/versioned-plugins/filters/cidr-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/cidr-v3.1.1.asciidoc create mode 100644 docs/versioned-plugins/filters/cidr-v3.1.2.asciidoc create mode 100644 docs/versioned-plugins/filters/cipher-index.asciidoc create mode 100644 docs/versioned-plugins/filters/cipher-v2.0.6.asciidoc create mode 100644 docs/versioned-plugins/filters/cipher-v2.0.7.asciidoc create mode 100644 docs/versioned-plugins/filters/cipher-v3.0.0.asciidoc create mode 100644 docs/versioned-plugins/filters/cipher-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/clone-index.asciidoc create mode 100644 docs/versioned-plugins/filters/clone-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/clone-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/clone-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/filters/cloudfoundry-index.asciidoc create mode 100644 docs/versioned-plugins/filters/collate-index.asciidoc create mode 100644 docs/versioned-plugins/filters/collate-v2.0.5.asciidoc create mode 100644 docs/versioned-plugins/filters/collate-v2.0.6.asciidoc create mode 100644 docs/versioned-plugins/filters/csv-index.asciidoc create mode 100644 docs/versioned-plugins/filters/csv-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/csv-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/csv-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/filters/csv-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/filters/csv-v3.0.7.asciidoc create mode 100644 docs/versioned-plugins/filters/date-index.asciidoc create mode 100644 docs/versioned-plugins/filters/date-v3.1.7.asciidoc create mode 100644 docs/versioned-plugins/filters/date-v3.1.8.asciidoc create mode 100644 docs/versioned-plugins/filters/date-v3.1.9.asciidoc create mode 100644 docs/versioned-plugins/filters/de_dot-index.asciidoc create mode 100644 docs/versioned-plugins/filters/de_dot-v1.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/de_dot-v1.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/de_dot-v1.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/debug-index.asciidoc create mode 100644 docs/versioned-plugins/filters/dissect-index.asciidoc create mode 100644 docs/versioned-plugins/filters/dissect-v1.0.9.asciidoc create mode 100644 docs/versioned-plugins/filters/dissect-v1.1.1.asciidoc create mode 100644 docs/versioned-plugins/filters/dissect-v1.1.2.asciidoc create mode 100644 docs/versioned-plugins/filters/dns-index.asciidoc create mode 100644 docs/versioned-plugins/filters/dns-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/dns-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/filters/dns-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/filters/dns-v3.0.7.asciidoc create mode 100644 docs/versioned-plugins/filters/drop-index.asciidoc create mode 100644 docs/versioned-plugins/filters/drop-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/drop-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/drop-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/filters/elapsed-index.asciidoc create mode 100644 docs/versioned-plugins/filters/elapsed-v4.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/elapsed-v4.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/elapsed-v4.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/elasticsearch-index.asciidoc create mode 100644 docs/versioned-plugins/filters/elasticsearch-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/filters/elasticsearch-v3.1.4.asciidoc create mode 100644 docs/versioned-plugins/filters/elasticsearch-v3.1.5.asciidoc create mode 100644 docs/versioned-plugins/filters/elasticsearch-v3.1.6.asciidoc create mode 100644 docs/versioned-plugins/filters/elasticsearch-v3.2.0.asciidoc create mode 100644 docs/versioned-plugins/filters/elasticsearch-v3.2.1.asciidoc create mode 100644 docs/versioned-plugins/filters/emoji-index.asciidoc create mode 100644 docs/versioned-plugins/filters/emoji-v1.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/emoji-v1.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/environment-index.asciidoc create mode 100644 docs/versioned-plugins/filters/environment-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/environment-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/environment-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/example-index.asciidoc create mode 100644 docs/versioned-plugins/filters/extractnumbers-index.asciidoc create mode 100644 docs/versioned-plugins/filters/extractnumbers-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/extractnumbers-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/extractnumbers-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/fingerprint-index.asciidoc create mode 100644 docs/versioned-plugins/filters/fingerprint-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/fingerprint-v3.1.0.asciidoc create mode 100644 docs/versioned-plugins/filters/fingerprint-v3.1.1.asciidoc create mode 100644 docs/versioned-plugins/filters/fingerprint-v3.1.2.asciidoc create mode 100644 docs/versioned-plugins/filters/geoip-index.asciidoc create mode 100644 docs/versioned-plugins/filters/geoip-v4.1.1.asciidoc create mode 100644 docs/versioned-plugins/filters/geoip-v4.2.0.asciidoc create mode 100644 docs/versioned-plugins/filters/geoip-v4.2.1.asciidoc create mode 100644 docs/versioned-plugins/filters/geoip-v4.3.0.asciidoc create mode 100644 docs/versioned-plugins/filters/geoip-v4.3.1.asciidoc create mode 100644 docs/versioned-plugins/filters/geoip-v5.0.0.asciidoc create mode 100644 docs/versioned-plugins/filters/geoip-v5.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/geoip-v5.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/geoip-v5.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/grok-index.asciidoc create mode 100644 docs/versioned-plugins/filters/grok-v3.4.1.asciidoc create mode 100644 docs/versioned-plugins/filters/grok-v3.4.2.asciidoc create mode 100644 docs/versioned-plugins/filters/grok-v3.4.3.asciidoc create mode 100644 docs/versioned-plugins/filters/grok-v3.4.4.asciidoc create mode 100644 docs/versioned-plugins/filters/grok-v4.0.0.asciidoc create mode 100644 docs/versioned-plugins/filters/grok-v4.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/hashid-index.asciidoc create mode 100644 docs/versioned-plugins/filters/hashid-v0.1.2.asciidoc create mode 100644 docs/versioned-plugins/filters/hashid-v0.1.3.asciidoc create mode 100644 docs/versioned-plugins/filters/i18n-index.asciidoc create mode 100644 docs/versioned-plugins/filters/i18n-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/i18n-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/i18n-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/jdbc_static-index.asciidoc create mode 100644 docs/versioned-plugins/filters/jdbc_streaming-index.asciidoc create mode 100644 docs/versioned-plugins/filters/jdbc_streaming-v1.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/jdbc_streaming-v1.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/jdbc_streaming-v1.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/json-index.asciidoc create mode 100644 docs/versioned-plugins/filters/json-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/json-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/json-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/filters/json_encode-index.asciidoc create mode 100644 docs/versioned-plugins/filters/json_encode-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/json_encode-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/json_encode-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/kubernetes_metadata-index.asciidoc create mode 100644 docs/versioned-plugins/filters/kv-index.asciidoc create mode 100644 docs/versioned-plugins/filters/kv-v4.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/kv-v4.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/kv-v4.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/language-index.asciidoc create mode 100644 docs/versioned-plugins/filters/lookup-index.asciidoc create mode 100644 docs/versioned-plugins/filters/math-index.asciidoc create mode 100644 docs/versioned-plugins/filters/metaevent-index.asciidoc create mode 100644 docs/versioned-plugins/filters/metaevent-v2.0.5.asciidoc create mode 100644 docs/versioned-plugins/filters/metaevent-v2.0.7.asciidoc create mode 100644 docs/versioned-plugins/filters/metricize-index.asciidoc create mode 100644 docs/versioned-plugins/filters/metricize-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/metricize-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/metricize-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/metrics-index.asciidoc create mode 100644 docs/versioned-plugins/filters/metrics-v4.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/metrics-v4.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/metrics-v4.0.5.asciidoc create mode 100644 docs/versioned-plugins/filters/multiline-index.asciidoc create mode 100644 docs/versioned-plugins/filters/multiline-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/multiline-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/mutate-index.asciidoc create mode 100644 docs/versioned-plugins/filters/mutate-v3.1.5.asciidoc create mode 100644 docs/versioned-plugins/filters/mutate-v3.1.6.asciidoc create mode 100644 docs/versioned-plugins/filters/mutate-v3.1.7.asciidoc create mode 100644 docs/versioned-plugins/filters/mutate-v3.2.0.asciidoc create mode 100644 docs/versioned-plugins/filters/oui-index.asciidoc create mode 100644 docs/versioned-plugins/filters/oui-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/oui-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/prune-index.asciidoc create mode 100644 docs/versioned-plugins/filters/prune-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/prune-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/prune-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/punct-index.asciidoc create mode 100644 docs/versioned-plugins/filters/punct-v2.0.5.asciidoc create mode 100644 docs/versioned-plugins/filters/punct-v2.0.6.asciidoc create mode 100644 docs/versioned-plugins/filters/range-index.asciidoc create mode 100644 docs/versioned-plugins/filters/range-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/range-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/range-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/ruby-index.asciidoc create mode 100644 docs/versioned-plugins/filters/ruby-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/ruby-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/ruby-v3.1.0.asciidoc create mode 100644 docs/versioned-plugins/filters/ruby-v3.1.1.asciidoc create mode 100644 docs/versioned-plugins/filters/ruby-v3.1.2.asciidoc create mode 100644 docs/versioned-plugins/filters/ruby-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/filters/script-index.asciidoc create mode 100644 docs/versioned-plugins/filters/sleep-index.asciidoc create mode 100644 docs/versioned-plugins/filters/sleep-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/sleep-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/filters/sleep-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/filters/split-index.asciidoc create mode 100644 docs/versioned-plugins/filters/split-v3.1.2.asciidoc create mode 100644 docs/versioned-plugins/filters/split-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/filters/split-v3.1.4.asciidoc create mode 100644 docs/versioned-plugins/filters/split-v3.1.5.asciidoc create mode 100644 docs/versioned-plugins/filters/split-v3.1.6.asciidoc create mode 100644 docs/versioned-plugins/filters/syslog_pri-index.asciidoc create mode 100644 docs/versioned-plugins/filters/syslog_pri-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/syslog_pri-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/syslog_pri-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/filters/throttle-index.asciidoc create mode 100644 docs/versioned-plugins/filters/throttle-v4.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/throttle-v4.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/throttle-v4.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/tld-index.asciidoc create mode 100644 docs/versioned-plugins/filters/tld-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/tld-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/tld-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/translate-index.asciidoc create mode 100644 docs/versioned-plugins/filters/translate-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/translate-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/translate-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/truncate-index.asciidoc create mode 100644 docs/versioned-plugins/filters/truncate-v1.0.2.asciidoc create mode 100644 docs/versioned-plugins/filters/truncate-v1.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/truncate-v1.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/unique-index.asciidoc create mode 100644 docs/versioned-plugins/filters/unique-v2.0.5.asciidoc create mode 100644 docs/versioned-plugins/filters/unique-v2.0.6.asciidoc create mode 100644 docs/versioned-plugins/filters/unique-v3.0.0.asciidoc create mode 100644 docs/versioned-plugins/filters/urldecode-index.asciidoc create mode 100644 docs/versioned-plugins/filters/urldecode-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/urldecode-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/filters/urldecode-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/filters/useragent-index.asciidoc create mode 100644 docs/versioned-plugins/filters/useragent-v3.1.0.asciidoc create mode 100644 docs/versioned-plugins/filters/useragent-v3.1.1.asciidoc create mode 100644 docs/versioned-plugins/filters/useragent-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/filters/useragent-v3.2.0.asciidoc create mode 100644 docs/versioned-plugins/filters/useragent-v3.2.1.asciidoc create mode 100644 docs/versioned-plugins/filters/useragent-v3.2.2.asciidoc create mode 100644 docs/versioned-plugins/filters/uuid-index.asciidoc create mode 100644 docs/versioned-plugins/filters/uuid-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/uuid-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/uuid-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/filters/xml-index.asciidoc create mode 100644 docs/versioned-plugins/filters/xml-v4.0.3.asciidoc create mode 100644 docs/versioned-plugins/filters/xml-v4.0.4.asciidoc create mode 100644 docs/versioned-plugins/filters/xml-v4.0.5.asciidoc create mode 100644 docs/versioned-plugins/filters/yaml-index.asciidoc create mode 100644 docs/versioned-plugins/filters/yaml-v0.1.1.asciidoc create mode 100644 docs/versioned-plugins/filters/zeromq-index.asciidoc create mode 100644 docs/versioned-plugins/filters/zeromq-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/filters/zeromq-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/beats-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/beats-v4.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/beats-v4.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/beats-v4.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/beats-v4.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/beats-v4.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc create mode 100644 docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/beats-v5.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/beats-v5.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/beats-v5.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/beats-v5.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/beats-v5.0.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/cloudwatch-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/cloudwatch-v2.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/cloudwatch-v2.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/cloudwatch-v2.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/cloudwatch_logs-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/couchdb_changes-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/couchdb_changes-v3.1.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/couchdb_changes-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/couchdb_changes-v3.1.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.0.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.1.0.asciidoc create mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.1.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.1.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/drupal_dblog-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/drupal_dblog-v2.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/drupal_dblog-v2.0.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/dynamodb-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/elasticsearch-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/elasticsearch-v4.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/elasticsearch-v4.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/elasticsearch-v4.0.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/elasticsearch-v4.1.0.asciidoc create mode 100644 docs/versioned-plugins/inputs/elasticsearch-v4.1.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/eventlog-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/eventlog-v4.1.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/eventlog-v4.1.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/example-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/exec-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/exec-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/exec-v3.1.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/exec-v3.1.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/file-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/file-v4.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/file-v4.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/fluentd-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/ganglia-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/ganglia-v3.1.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/ganglia-v3.1.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/ganglia-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/gelf-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/gelf-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/gelf-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/gelf-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/gelf-v3.0.7.asciidoc create mode 100644 docs/versioned-plugins/inputs/gemfire-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/gemfire-v2.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/gemfire-v2.0.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/generator-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/generator-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/generator-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/generator-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/github-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/github-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/github-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/github-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/google_pubsub-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/google_pubsub-v1.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/google_pubsub-v1.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/google_pubsub-v1.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/googleanalytics-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/graphite-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/graphite-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/graphite-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/heartbeat-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/heartbeat-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/heartbeat-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/heartbeat-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/heroku-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/heroku-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/heroku-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/http-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/http-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/http-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/http-v3.0.7.asciidoc create mode 100644 docs/versioned-plugins/inputs/http-v3.0.8.asciidoc create mode 100644 docs/versioned-plugins/inputs/http_poller-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/http_poller-v3.3.0.asciidoc create mode 100644 docs/versioned-plugins/inputs/http_poller-v3.3.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/http_poller-v3.3.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/http_poller-v3.3.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/http_poller-v3.3.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/http_poller-v4.0.0.asciidoc create mode 100644 docs/versioned-plugins/inputs/http_poller-v4.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/http_poller-v4.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/http_poller-v4.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/http_poller-v4.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/imap-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/imap-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/imap-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/imap-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/irc-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/irc-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/irc-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/irc-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/irc-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/jdbc-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/jdbc-v4.2.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/jdbc-v4.2.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/jdbc-v4.2.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/jdbc-v4.2.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/jdbc-v4.3.0.asciidoc create mode 100644 docs/versioned-plugins/inputs/jdbc-v4.3.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/jdbc-v4.3.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/jdbc-v4.3.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/jms-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/jms-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/jms-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/jms-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/jmx-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/jmx-pipe-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/jmx-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/jmx-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/jmx-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/journald-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/journald-v2.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/kafka-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/kafka-v6.3.0.asciidoc create mode 100644 docs/versioned-plugins/inputs/kafka-v6.3.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/kafka-v6.3.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/kafka-v6.3.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/kafka-v7.0.0.asciidoc create mode 100644 docs/versioned-plugins/inputs/kafka-v8.0.0.asciidoc create mode 100644 docs/versioned-plugins/inputs/kafka-v8.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/kafka-v8.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/kinesis-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/kinesis-v2.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/kinesis-v2.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/kinesis-v2.0.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/kinesis-v2.0.7.asciidoc create mode 100644 docs/versioned-plugins/inputs/log4j-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/log4j-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/log4j-v3.1.0.asciidoc create mode 100644 docs/versioned-plugins/inputs/log4j-v3.1.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/log4j-v3.1.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/log4j2-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/lumberjack-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/lumberjack-v3.1.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/lumberjack-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/lumberjack-v3.1.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/meetup-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/meetup-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/meetup-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/meetup-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/mongodb-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/neo4j-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/neo4j-v2.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/neo4j-v2.0.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/netflow-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/perfmon-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/pipe-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/pipe-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/pipe-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/pipe-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/puppet_facter-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/puppet_facter-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/puppet_facter-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/puppet_facter-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/rabbitmq-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/rabbitmq-v5.2.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/rabbitmq-v5.2.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/rabbitmq-v6.0.0.asciidoc create mode 100644 docs/versioned-plugins/inputs/rabbitmq-v6.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/rabbitmq-v6.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/rackspace-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/rackspace-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/redis-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/redis-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/redis-v3.1.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/redis-v3.1.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/redis-v3.1.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/relp-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/relp-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/relp-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/relp-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/rss-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/rss-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/rss-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/rss-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/s3-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/s3-v3.1.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/s3-v3.1.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/s3-v3.1.7.asciidoc create mode 100644 docs/versioned-plugins/inputs/s3-v3.1.8.asciidoc create mode 100644 docs/versioned-plugins/inputs/s3-v3.1.9.asciidoc create mode 100644 docs/versioned-plugins/inputs/s3-v3.2.0.asciidoc create mode 100644 docs/versioned-plugins/inputs/s3sqs-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/salesforce-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/salesforce-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/salesforce-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/snmptrap-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/snmptrap-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/snmptrap-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/snmptrap-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/sqlite-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/sqlite-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/sqlite-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/sqlite-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/sqs-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/sqs-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/sqs-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/sqs-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/sqs-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/stdin-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/stdin-v3.2.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/stdin-v3.2.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/stdin-v3.2.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/stomp-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/stomp-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/stomp-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/stomp-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/stomp-v3.0.7.asciidoc create mode 100644 docs/versioned-plugins/inputs/syslog-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/syslog-v3.2.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/syslog-v3.2.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/syslog-v3.2.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/syslog-v3.2.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/tcp-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/tcp-v4.1.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/tcp-v4.2.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/tcp-v4.2.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/tcp-v4.2.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/tcp-v5.0.0.asciidoc create mode 100644 docs/versioned-plugins/inputs/tcp-v5.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/tcp-v5.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/tcp-v5.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/twitter-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/twitter-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/twitter-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/twitter-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/twitter-v3.0.7.asciidoc create mode 100644 docs/versioned-plugins/inputs/udp-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/udp-v3.1.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/udp-v3.1.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/udp-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/udp-v3.2.0.asciidoc create mode 100644 docs/versioned-plugins/inputs/udp-v3.2.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/unix-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/unix-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/unix-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/unix-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/varnishlog-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/varnishlog-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/varnishlog-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/varnishlog-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/websocket-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/websocket-v4.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/websocket-v4.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/websocket-v4.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/wmi-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/wmi-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/inputs/wmi-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/inputs/wmi-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/xmpp-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/xmpp-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/inputs/xmpp-v3.1.4.asciidoc create mode 100644 docs/versioned-plugins/inputs/xmpp-v3.1.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/xmpp-v3.1.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/zenoss-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/zenoss-v2.0.5.asciidoc create mode 100644 docs/versioned-plugins/inputs/zenoss-v2.0.6.asciidoc create mode 100644 docs/versioned-plugins/inputs/zeromq-index.asciidoc create mode 100644 docs/versioned-plugins/inputs/zeromq-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/beats-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/boundary-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/boundary-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/boundary-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/boundary-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/circonus-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/circonus-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/circonus-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/circonus-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/cloudwatch-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/cloudwatch-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/cloudwatch-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/outputs/cloudwatch-v3.0.7.asciidoc create mode 100644 docs/versioned-plugins/outputs/csv-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/csv-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/csv-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/csv-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/outputs/datadog-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/datadog-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/datadog-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/datadog-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/datadog_metrics-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/datadog_metrics-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/datadog_metrics-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v7.3.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v7.3.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v7.3.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v7.3.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v7.3.6.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v7.3.7.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v7.3.8.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v7.4.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v7.4.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v7.4.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v8.0.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v8.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v8.2.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v8.2.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v9.0.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch-v9.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch_java-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/elasticsearch_java-v2.1.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/email-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/email-v4.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/email-v4.0.6.asciidoc create mode 100644 docs/versioned-plugins/outputs/example-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/exec-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/exec-v3.1.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/exec-v3.1.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/exec-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/file-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/file-v4.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/file-v4.1.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/file-v4.1.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/file-v4.1.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/file-v4.2.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/file-v4.2.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/firehose-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/ganglia-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/ganglia-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/ganglia-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/ganglia-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/gelf-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/gelf-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/gelf-v3.1.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/gemfire-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/gemfire-v2.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/gemfire-v2.0.6.asciidoc create mode 100644 docs/versioned-plugins/outputs/google_bigquery-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/google_bigquery-v3.2.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/google_bigquery-v3.2.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/google_bigquery-v3.2.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/google_cloud_storage-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/google_cloud_storage-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/google_cloud_storage-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/graphite-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/graphite-v3.1.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/graphite-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/graphite-v3.1.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/graphtastic-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/graphtastic-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/graphtastic-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/graphtastic-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/hipchat-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/hipchat-v4.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/http-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/http-v4.3.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/http-v4.3.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/http-v4.3.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/http-v4.3.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/http-v4.4.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/http-v5.0.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/http-v5.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/http-v5.1.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/http-v5.1.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/http-v5.1.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/icinga-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/influxdb-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/influxdb-v5.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/influxdb-v5.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/influxdb-v5.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/irc-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/irc-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/irc-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/irc-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/jira-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/jira-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/jira-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/jms-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/jms-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/juggernaut-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/juggernaut-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/juggernaut-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/juggernaut-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/kafka-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/kafka-v6.2.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/kafka-v6.2.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/kafka-v6.2.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/kafka-v7.0.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/kafka-v7.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/kafka-v7.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/kafka-v7.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/kafka-v7.0.6.asciidoc create mode 100644 docs/versioned-plugins/outputs/librato-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/librato-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/librato-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/librato-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/logentries-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/loggly-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/loggly-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/loggly-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/loggly-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/lumberjack-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/lumberjack-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/lumberjack-v3.1.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/metriccatcher-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/metriccatcher-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/metriccatcher-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/metriccatcher-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/monasca_log_api-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/monasca_log_api-v1.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/mongodb-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/mongodb-v3.1.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/mongodb-v3.1.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/mongodb-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/nagios-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/nagios-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/nagios-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/nagios-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/nagios_nsca-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/nagios_nsca-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/nagios_nsca-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/nagios_nsca-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/neo4j-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/neo4j-v2.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/newrelic-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/null-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/null-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/null-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/opentsdb-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/opentsdb-v3.1.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/opentsdb-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/opentsdb-v3.1.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/pagerduty-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/pagerduty-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/pagerduty-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/pagerduty-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/outputs/pipe-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/pipe-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/pipe-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/pipe-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/rabbitmq-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/rabbitmq-v4.0.8.asciidoc create mode 100644 docs/versioned-plugins/outputs/rabbitmq-v4.0.9.asciidoc create mode 100644 docs/versioned-plugins/outputs/rabbitmq-v5.0.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/rabbitmq-v5.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/rabbitmq-v5.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/rabbitmq-v5.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/rabbitmq-v5.1.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/rackspace-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/rackspace-v2.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/rackspace-v2.0.7.asciidoc create mode 100644 docs/versioned-plugins/outputs/rados-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/redis-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/redis-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/redis-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/redis-v4.0.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/redis-v4.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/redis-v4.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/redis-v4.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/redmine-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/redmine-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/redmine-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/redmine-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/riak-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/riak-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/riak-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/riak-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/riemann-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/riemann-v3.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/riemann-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/riemann-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/s3-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/s3-v4.0.8.asciidoc create mode 100644 docs/versioned-plugins/outputs/s3-v4.0.9.asciidoc create mode 100644 docs/versioned-plugins/outputs/slack-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/slack-v2.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/slack-v2.1.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/sns-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/sns-v4.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/sns-v4.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/sns-v4.0.6.asciidoc create mode 100644 docs/versioned-plugins/outputs/solr_http-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/solr_http-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/solr_http-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/solr_http-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/sqs-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/sqs-v4.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/sqs-v4.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/sqs-v5.0.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/sqs-v5.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/sqs-v5.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/statsd-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/statsd-v3.1.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/statsd-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/statsd-v3.1.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/stdout-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/stdout-v3.1.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/stdout-v3.1.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/stdout-v3.1.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/stomp-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/stomp-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/stomp-v3.0.7.asciidoc create mode 100644 docs/versioned-plugins/outputs/stomp-v3.0.8.asciidoc create mode 100644 docs/versioned-plugins/outputs/syslog-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/syslog-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/syslog-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/syslog-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/tcp-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/tcp-v4.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/tcp-v4.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/tcp-v5.0.0.asciidoc create mode 100644 docs/versioned-plugins/outputs/tcp-v5.0.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/tcp-v5.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/timber-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/timber-v1.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/udp-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/udp-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/udp-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/udp-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/webhdfs-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/webhdfs-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/webhdfs-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/webhdfs-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/websocket-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/websocket-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/websocket-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/websocket-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/xmpp-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/xmpp-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/xmpp-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/xmpp-v3.0.5.asciidoc create mode 100644 docs/versioned-plugins/outputs/xmpp-v3.0.6.asciidoc create mode 100644 docs/versioned-plugins/outputs/xmpp-v3.0.7.asciidoc create mode 100644 docs/versioned-plugins/outputs/zabbix-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/zabbix-v3.0.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/zabbix-v3.0.3.asciidoc create mode 100644 docs/versioned-plugins/outputs/zabbix-v3.0.4.asciidoc create mode 100644 docs/versioned-plugins/outputs/zeromq-index.asciidoc create mode 100644 docs/versioned-plugins/outputs/zeromq-v3.1.1.asciidoc create mode 100644 docs/versioned-plugins/outputs/zeromq-v3.1.2.asciidoc create mode 100644 docs/versioned-plugins/outputs/zookeeper-index.asciidoc diff --git a/docs/versioned-plugins/codecs-index.asciidoc b/docs/versioned-plugins/codecs-index.asciidoc new file mode 100644 index 000000000..8efd1e6ee --- /dev/null +++ b/docs/versioned-plugins/codecs-index.asciidoc @@ -0,0 +1,36 @@ +:type: codec +:type_uc: Codec + +include::include/plugin-intro.asciidoc[] + +include::codecs/avro-index.asciidoc[] +include::codecs/cef-index.asciidoc[] +include::codecs/cloudfront-index.asciidoc[] +include::codecs/cloudtrail-index.asciidoc[] +include::codecs/collectd-index.asciidoc[] +include::codecs/compress_spooler-index.asciidoc[] +include::codecs/csv-index.asciidoc[] +include::codecs/dots-index.asciidoc[] +include::codecs/edn-index.asciidoc[] +include::codecs/edn_lines-index.asciidoc[] +include::codecs/es_bulk-index.asciidoc[] +include::codecs/example-index.asciidoc[] +include::codecs/fluent-index.asciidoc[] +include::codecs/graphite-index.asciidoc[] +include::codecs/gzip_lines-index.asciidoc[] +include::codecs/json-index.asciidoc[] +include::codecs/json_lines-index.asciidoc[] +include::codecs/json_pretty-index.asciidoc[] +include::codecs/line-index.asciidoc[] +include::codecs/msgpack-index.asciidoc[] +include::codecs/multiline-index.asciidoc[] +include::codecs/netflow-index.asciidoc[] +include::codecs/nmap-index.asciidoc[] +include::codecs/oldlogstashjson-index.asciidoc[] +include::codecs/plain-index.asciidoc[] +include::codecs/pretty-index.asciidoc[] +include::codecs/protobuf-index.asciidoc[] +include::codecs/rubydebug-index.asciidoc[] +include::codecs/s3plain-index.asciidoc[] +include::codecs/sflow-index.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/avro-index.asciidoc b/docs/versioned-plugins/codecs/avro-index.asciidoc new file mode 100644 index 000000000..5278efd13 --- /dev/null +++ b/docs/versioned-plugins/codecs/avro-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: avro +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +| <> | 2017-05-19 +|======================================================================= + +include::avro-v3.2.3.asciidoc[] +include::avro-v3.2.2.asciidoc[] +include::avro-v3.2.1.asciidoc[] +include::avro-v3.2.0.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/avro-v3.2.0.asciidoc b/docs/versioned-plugins/codecs/avro-v3.2.0.asciidoc new file mode 100644 index 000000000..1ebdfe7c6 --- /dev/null +++ b/docs/versioned-plugins/codecs/avro-v3.2.0.asciidoc @@ -0,0 +1,103 @@ +:plugin: avro +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.0 +:release_date: 2017-05-19 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-avro/blob/v3.2.0/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Avro + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read serialized Avro records as Logstash events + +This plugin is used to serialize Logstash events as +Avro datums, as well as deserializing Avro datums into +Logstash events. + +==== Encoding + +This codec is for serializing individual Logstash events +as Avro datums that are Avro binary blobs. It does not encode +Logstash events into an Avro file. + + +==== Decoding + +This codec is for deserializing individual Avro records. It is not for reading +Avro files. Avro files have a unique format that must be handled upon input. + + +==== Usage +Example usage with Kafka input. + +[source,ruby] +---------------------------------- +input { + kafka { + codec => avro { + schema_uri => "/tmp/schema.avsc" + } + } +} +filter { + ... +} +output { + ... +} +---------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Avro Codec Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-schema_uri>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +codec plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-schema_uri"] +===== `schema_uri` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +schema path to fetch the schema from. +This can be a 'http' or 'file' scheme URI +example: + +* http - `http://example.com/schema.avsc` +* file - `/path/to/schema.avsc` + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `false` + +tag events with `_avroparsefailure` when decode fails + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/codecs/avro-v3.2.1.asciidoc b/docs/versioned-plugins/codecs/avro-v3.2.1.asciidoc new file mode 100644 index 000000000..b698baa97 --- /dev/null +++ b/docs/versioned-plugins/codecs/avro-v3.2.1.asciidoc @@ -0,0 +1,96 @@ +:plugin: avro +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-avro/blob/v3.2.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Avro codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read serialized Avro records as Logstash events + +This plugin is used to serialize Logstash events as +Avro datums, as well as deserializing Avro datums into +Logstash events. + +==== Encoding + +This codec is for serializing individual Logstash events +as Avro datums that are Avro binary blobs. It does not encode +Logstash events into an Avro file. + + +==== Decoding + +This codec is for deserializing individual Avro records. It is not for reading +Avro files. Avro files have a unique format that must be handled upon input. + + +==== Usage +Example usage with Kafka input. + +[source,ruby] +---------------------------------- +input { + kafka { + codec => avro { + schema_uri => "/tmp/schema.avsc" + } + } +} +filter { + ... +} +output { + ... +} +---------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Avro Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-schema_uri>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-schema_uri"] +===== `schema_uri` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +schema path to fetch the schema from. +This can be a 'http' or 'file' scheme URI +example: + +* http - `http://example.com/schema.avsc` +* file - `/path/to/schema.avsc` + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `false` + +tag events with `_avroparsefailure` when decode fails + + diff --git a/docs/versioned-plugins/codecs/avro-v3.2.2.asciidoc b/docs/versioned-plugins/codecs/avro-v3.2.2.asciidoc new file mode 100644 index 000000000..3deabc7a8 --- /dev/null +++ b/docs/versioned-plugins/codecs/avro-v3.2.2.asciidoc @@ -0,0 +1,96 @@ +:plugin: avro +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-avro/blob/v3.2.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Avro codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read serialized Avro records as Logstash events + +This plugin is used to serialize Logstash events as +Avro datums, as well as deserializing Avro datums into +Logstash events. + +==== Encoding + +This codec is for serializing individual Logstash events +as Avro datums that are Avro binary blobs. It does not encode +Logstash events into an Avro file. + + +==== Decoding + +This codec is for deserializing individual Avro records. It is not for reading +Avro files. Avro files have a unique format that must be handled upon input. + + +==== Usage +Example usage with Kafka input. + +[source,ruby] +---------------------------------- +input { + kafka { + codec => avro { + schema_uri => "/tmp/schema.avsc" + } + } +} +filter { + ... +} +output { + ... +} +---------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Avro Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-schema_uri>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-schema_uri"] +===== `schema_uri` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +schema path to fetch the schema from. +This can be a 'http' or 'file' scheme URI +example: + +* http - `http://example.com/schema.avsc` +* file - `/path/to/schema.avsc` + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `false` + +tag events with `_avroparsefailure` when decode fails + + diff --git a/docs/versioned-plugins/codecs/avro-v3.2.3.asciidoc b/docs/versioned-plugins/codecs/avro-v3.2.3.asciidoc new file mode 100644 index 000000000..6af43c511 --- /dev/null +++ b/docs/versioned-plugins/codecs/avro-v3.2.3.asciidoc @@ -0,0 +1,96 @@ +:plugin: avro +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-avro/blob/v3.2.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Avro codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read serialized Avro records as Logstash events + +This plugin is used to serialize Logstash events as +Avro datums, as well as deserializing Avro datums into +Logstash events. + +==== Encoding + +This codec is for serializing individual Logstash events +as Avro datums that are Avro binary blobs. It does not encode +Logstash events into an Avro file. + + +==== Decoding + +This codec is for deserializing individual Avro records. It is not for reading +Avro files. Avro files have a unique format that must be handled upon input. + + +==== Usage +Example usage with Kafka input. + +[source,ruby] +---------------------------------- +input { + kafka { + codec => avro { + schema_uri => "/tmp/schema.avsc" + } + } +} +filter { + ... +} +output { + ... +} +---------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Avro Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-schema_uri>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-schema_uri"] +===== `schema_uri` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +schema path to fetch the schema from. +This can be a 'http' or 'file' scheme URI +example: + +* http - `http://example.com/schema.avsc` +* file - `/path/to/schema.avsc` + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `false` + +tag events with `_avroparsefailure` when decode fails + + diff --git a/docs/versioned-plugins/codecs/cef-index.asciidoc b/docs/versioned-plugins/codecs/cef-index.asciidoc new file mode 100644 index 000000000..4f2fbc35e --- /dev/null +++ b/docs/versioned-plugins/codecs/cef-index.asciidoc @@ -0,0 +1,20 @@ +:plugin: cef +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-08-01 +| <> | 2017-08-18 +| <> | 2017-06-23 +|======================================================================= + +include::cef-v5.0.2.asciidoc[] +include::cef-v5.0.1.asciidoc[] +include::cef-v5.0.0.asciidoc[] +include::cef-v4.1.4.asciidoc[] +include::cef-v4.1.3.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/cef-v4.1.3.asciidoc b/docs/versioned-plugins/codecs/cef-v4.1.3.asciidoc new file mode 100644 index 000000000..7c1216983 --- /dev/null +++ b/docs/versioned-plugins/codecs/cef-v4.1.3.asciidoc @@ -0,0 +1,164 @@ +:plugin: cef +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-cef/blob/v4.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Cef codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Implementation of a Logstash codec for the ArcSight Common Event Format (CEF) +Based on Revision 20 of Implementing ArcSight CEF, dated from June 05, 2013 +https://community.saas.hpe.com/dcvta86296/attachments/dcvta86296/connector-documentation/1116/1/CommonEventFormatv23.pdf + +If this codec receives a payload from an input that is not a valid CEF message, then it will +produce an event with the payload as the 'message' field and a '_cefparsefailure' tag. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cef Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-product>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-signature>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vendor>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is <> + * There is no default value for this setting. + +If your input puts a delimiter between each CEF event, you'll want to set +this to be that delimiter. + +For example, with the TCP input, you probably want to put this: + + input { + tcp { + codec => cef { delimiter => "\r\n" } + # ... + } + } + +This setting allows the following character sequences to have special meaning: + +* `\\r` (backslash "r") - means carriage return (ASCII 0x0D) +* `\\n` (backslash "n") - means newline (ASCII 0x0A) + +[id="{version}-plugins-{type}s-{plugin}-deprecated_v1_fields"] +===== `deprecated_v1_fields` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +Set this flag if you want to have both v1 and v2 fields indexed at the same time. Note that this option will increase +the index size and data stored in outputs like Elasticsearch +This option is available to ease transition to new schema + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * Default value is `[]` + +Fields to be included in CEV extension part as key/value pairs + +[id="{version}-plugins-{type}s-{plugin}-name"] +===== `name` + + * Value type is <> + * Default value is `"Logstash"` + +Name field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-product"] +===== `product` + + * Value type is <> + * Default value is `"Logstash"` + +Device product field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-sev"] +===== `sev` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +Deprecated severity field for CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +This field is used only if :severity is unchanged set to the default value. + +Defined as field of type string to allow sprintf. The value will be validated +to be an integer in the range from 0 to 10 (including). +All invalid values will be mapped to the default of 6. + +[id="{version}-plugins-{type}s-{plugin}-severity"] +===== `severity` + + * Value type is <> + * Default value is `"6"` + +Severity field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Defined as field of type string to allow sprintf. The value will be validated +to be an integer in the range from 0 to 10 (including). +All invalid values will be mapped to the default of 6. + +[id="{version}-plugins-{type}s-{plugin}-signature"] +===== `signature` + + * Value type is <> + * Default value is `"Logstash"` + +Signature ID field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-vendor"] +===== `vendor` + + * Value type is <> + * Default value is `"Elasticsearch"` + +Device vendor field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * Default value is `"1.0"` + +Device version field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + + diff --git a/docs/versioned-plugins/codecs/cef-v4.1.4.asciidoc b/docs/versioned-plugins/codecs/cef-v4.1.4.asciidoc new file mode 100644 index 000000000..da54ce679 --- /dev/null +++ b/docs/versioned-plugins/codecs/cef-v4.1.4.asciidoc @@ -0,0 +1,164 @@ +:plugin: cef +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.4 +:release_date: 2017-08-18 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-cef/blob/v4.1.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cef codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Implementation of a Logstash codec for the ArcSight Common Event Format (CEF) +Based on Revision 20 of Implementing ArcSight CEF, dated from June 05, 2013 +https://community.saas.hpe.com/dcvta86296/attachments/dcvta86296/connector-documentation/1116/1/CommonEventFormatv23.pdf + +If this codec receives a payload from an input that is not a valid CEF message, then it will +produce an event with the payload as the 'message' field and a '_cefparsefailure' tag. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cef Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-product>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-signature>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vendor>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is <> + * There is no default value for this setting. + +If your input puts a delimiter between each CEF event, you'll want to set +this to be that delimiter. + +For example, with the TCP input, you probably want to put this: + + input { + tcp { + codec => cef { delimiter => "\r\n" } + # ... + } + } + +This setting allows the following character sequences to have special meaning: + +* `\\r` (backslash "r") - means carriage return (ASCII 0x0D) +* `\\n` (backslash "n") - means newline (ASCII 0x0A) + +[id="{version}-plugins-{type}s-{plugin}-deprecated_v1_fields"] +===== `deprecated_v1_fields` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +Set this flag if you want to have both v1 and v2 fields indexed at the same time. Note that this option will increase +the index size and data stored in outputs like Elasticsearch +This option is available to ease transition to new schema + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * Default value is `[]` + +Fields to be included in CEV extension part as key/value pairs + +[id="{version}-plugins-{type}s-{plugin}-name"] +===== `name` + + * Value type is <> + * Default value is `"Logstash"` + +Name field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-product"] +===== `product` + + * Value type is <> + * Default value is `"Logstash"` + +Device product field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-sev"] +===== `sev` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +Deprecated severity field for CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +This field is used only if :severity is unchanged set to the default value. + +Defined as field of type string to allow sprintf. The value will be validated +to be an integer in the range from 0 to 10 (including). +All invalid values will be mapped to the default of 6. + +[id="{version}-plugins-{type}s-{plugin}-severity"] +===== `severity` + + * Value type is <> + * Default value is `"6"` + +Severity field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Defined as field of type string to allow sprintf. The value will be validated +to be an integer in the range from 0 to 10 (including). +All invalid values will be mapped to the default of 6. + +[id="{version}-plugins-{type}s-{plugin}-signature"] +===== `signature` + + * Value type is <> + * Default value is `"Logstash"` + +Signature ID field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-vendor"] +===== `vendor` + + * Value type is <> + * Default value is `"Elasticsearch"` + +Device vendor field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * Default value is `"1.0"` + +Device version field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + + diff --git a/docs/versioned-plugins/codecs/cef-v5.0.0.asciidoc b/docs/versioned-plugins/codecs/cef-v5.0.0.asciidoc new file mode 100644 index 000000000..0740425c8 --- /dev/null +++ b/docs/versioned-plugins/codecs/cef-v5.0.0.asciidoc @@ -0,0 +1,153 @@ +:plugin: cef +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.0 +:release_date: 2017-08-01 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-cef/blob/v5.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cef codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Implementation of a Logstash codec for the ArcSight Common Event Format (CEF) +Based on Revision 20 of Implementing ArcSight CEF, dated from June 05, 2013 +https://community.saas.hpe.com/dcvta86296/attachments/dcvta86296/connector-documentation/1116/1/CommonEventFormatv23.pdf + +If this codec receives a payload from an input that is not a valid CEF message, then it will +produce an event with the payload as the 'message' field and a '_cefparsefailure' tag. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cef Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-product>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-signature>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vendor>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is <> + * There is no default value for this setting. + +If your input puts a delimiter between each CEF event, you'll want to set +this to be that delimiter. + +For example, with the TCP input, you probably want to put this: + + input { + tcp { + codec => cef { delimiter => "\r\n" } + # ... + } + } + +This setting allows the following character sequences to have special meaning: + +* `\\r` (backslash "r") - means carriage return (ASCII 0x0D) +* `\\n` (backslash "n") - means newline (ASCII 0x0A) + +[id="{version}-plugins-{type}s-{plugin}-deprecated_v1_fields"] +===== `deprecated_v1_fields` (OBSOLETE) + + * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting if used + * Value type is <> + * There is no default value for this setting. + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * Default value is `[]` + +Fields to be included in CEV extension part as key/value pairs + +[id="{version}-plugins-{type}s-{plugin}-name"] +===== `name` + + * Value type is <> + * Default value is `"Logstash"` + +Name field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-product"] +===== `product` + + * Value type is <> + * Default value is `"Logstash"` + +Device product field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-sev"] +===== `sev` (OBSOLETE) + + * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting. + * Value type is <> + * There is no default value for this setting. + +Obsolete severity field for CEF header use :severity instead. + +[id="{version}-plugins-{type}s-{plugin}-severity"] +===== `severity` + + * Value type is <> + * Default value is `"6"` + +Severity field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Defined as field of type string to allow sprintf. The value will be validated +to be an integer in the range from 0 to 10 (including). +All invalid values will be mapped to the default of 6. + +[id="{version}-plugins-{type}s-{plugin}-signature"] +===== `signature` + + * Value type is <> + * Default value is `"Logstash"` + +Signature ID field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-vendor"] +===== `vendor` + + * Value type is <> + * Default value is `"Elasticsearch"` + +Device vendor field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * Default value is `"1.0"` + +Device version field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + + diff --git a/docs/versioned-plugins/codecs/cef-v5.0.1.asciidoc b/docs/versioned-plugins/codecs/cef-v5.0.1.asciidoc new file mode 100644 index 000000000..224361280 --- /dev/null +++ b/docs/versioned-plugins/codecs/cef-v5.0.1.asciidoc @@ -0,0 +1,153 @@ +:plugin: cef +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.1 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-cef/blob/v5.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cef codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Implementation of a Logstash codec for the ArcSight Common Event Format (CEF) +Based on Revision 20 of Implementing ArcSight CEF, dated from June 05, 2013 +https://community.saas.hpe.com/dcvta86296/attachments/dcvta86296/connector-documentation/1116/1/CommonEventFormatv23.pdf + +If this codec receives a payload from an input that is not a valid CEF message, then it will +produce an event with the payload as the 'message' field and a '_cefparsefailure' tag. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cef Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-product>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-signature>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vendor>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is <> + * There is no default value for this setting. + +If your input puts a delimiter between each CEF event, you'll want to set +this to be that delimiter. + +For example, with the TCP input, you probably want to put this: + + input { + tcp { + codec => cef { delimiter => "\r\n" } + # ... + } + } + +This setting allows the following character sequences to have special meaning: + +* `\\r` (backslash "r") - means carriage return (ASCII 0x0D) +* `\\n` (backslash "n") - means newline (ASCII 0x0A) + +[id="{version}-plugins-{type}s-{plugin}-deprecated_v1_fields"] +===== `deprecated_v1_fields` (OBSOLETE) + + * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting if used + * Value type is <> + * There is no default value for this setting. + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * Default value is `[]` + +Fields to be included in CEV extension part as key/value pairs + +[id="{version}-plugins-{type}s-{plugin}-name"] +===== `name` + + * Value type is <> + * Default value is `"Logstash"` + +Name field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-product"] +===== `product` + + * Value type is <> + * Default value is `"Logstash"` + +Device product field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-sev"] +===== `sev` (OBSOLETE) + + * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting. + * Value type is <> + * There is no default value for this setting. + +Obsolete severity field for CEF header use :severity instead. + +[id="{version}-plugins-{type}s-{plugin}-severity"] +===== `severity` + + * Value type is <> + * Default value is `"6"` + +Severity field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Defined as field of type string to allow sprintf. The value will be validated +to be an integer in the range from 0 to 10 (including). +All invalid values will be mapped to the default of 6. + +[id="{version}-plugins-{type}s-{plugin}-signature"] +===== `signature` + + * Value type is <> + * Default value is `"Logstash"` + +Signature ID field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-vendor"] +===== `vendor` + + * Value type is <> + * Default value is `"Elasticsearch"` + +Device vendor field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * Default value is `"1.0"` + +Device version field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + + diff --git a/docs/versioned-plugins/codecs/cef-v5.0.2.asciidoc b/docs/versioned-plugins/codecs/cef-v5.0.2.asciidoc new file mode 100644 index 000000000..039366cad --- /dev/null +++ b/docs/versioned-plugins/codecs/cef-v5.0.2.asciidoc @@ -0,0 +1,153 @@ +:plugin: cef +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.2 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-cef/blob/v5.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cef codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Implementation of a Logstash codec for the ArcSight Common Event Format (CEF) +Based on Revision 20 of Implementing ArcSight CEF, dated from June 05, 2013 +https://community.saas.hpe.com/dcvta86296/attachments/dcvta86296/connector-documentation/1116/1/CommonEventFormatv23.pdf + +If this codec receives a payload from an input that is not a valid CEF message, then it will +produce an event with the payload as the 'message' field and a '_cefparsefailure' tag. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cef Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-product>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-signature>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vendor>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is <> + * There is no default value for this setting. + +If your input puts a delimiter between each CEF event, you'll want to set +this to be that delimiter. + +For example, with the TCP input, you probably want to put this: + + input { + tcp { + codec => cef { delimiter => "\r\n" } + # ... + } + } + +This setting allows the following character sequences to have special meaning: + +* `\\r` (backslash "r") - means carriage return (ASCII 0x0D) +* `\\n` (backslash "n") - means newline (ASCII 0x0A) + +[id="{version}-plugins-{type}s-{plugin}-deprecated_v1_fields"] +===== `deprecated_v1_fields` (OBSOLETE) + + * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting if used + * Value type is <> + * There is no default value for this setting. + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * Default value is `[]` + +Fields to be included in CEV extension part as key/value pairs + +[id="{version}-plugins-{type}s-{plugin}-name"] +===== `name` + + * Value type is <> + * Default value is `"Logstash"` + +Name field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-product"] +===== `product` + + * Value type is <> + * Default value is `"Logstash"` + +Device product field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-sev"] +===== `sev` (OBSOLETE) + + * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting. + * Value type is <> + * There is no default value for this setting. + +Obsolete severity field for CEF header use :severity instead. + +[id="{version}-plugins-{type}s-{plugin}-severity"] +===== `severity` + + * Value type is <> + * Default value is `"6"` + +Severity field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Defined as field of type string to allow sprintf. The value will be validated +to be an integer in the range from 0 to 10 (including). +All invalid values will be mapped to the default of 6. + +[id="{version}-plugins-{type}s-{plugin}-signature"] +===== `signature` + + * Value type is <> + * Default value is `"Logstash"` + +Signature ID field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-vendor"] +===== `vendor` + + * Value type is <> + * Default value is `"Elasticsearch"` + +Device vendor field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * Default value is `"1.0"` + +Device version field in CEF header. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + + diff --git a/docs/versioned-plugins/codecs/cloudfront-index.asciidoc b/docs/versioned-plugins/codecs/cloudfront-index.asciidoc new file mode 100644 index 000000000..197e7ac9d --- /dev/null +++ b/docs/versioned-plugins/codecs/cloudfront-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: cloudfront +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::cloudfront-v3.0.3.asciidoc[] +include::cloudfront-v3.0.2.asciidoc[] +include::cloudfront-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/cloudfront-v3.0.1.asciidoc b/docs/versioned-plugins/codecs/cloudfront-v3.0.1.asciidoc new file mode 100644 index 000000000..736fae151 --- /dev/null +++ b/docs/versioned-plugins/codecs/cloudfront-v3.0.1.asciidoc @@ -0,0 +1,52 @@ +:plugin: cloudfront +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-cloudfront/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Cloudfront codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec will read cloudfront encoded content + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cloudfront Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this codec. Examples include "UTF-8" and +"CP1252" + +JSON requires valid UTF-8 strings, but in some cases, software that +emits JSON does so in another encoding (nxlog, for example). In +weird cases like this, you can set the charset setting to the +actual encoding of the text and logstash will convert it for you. + +For nxlog users, you'll want to set this to "CP1252" + + diff --git a/docs/versioned-plugins/codecs/cloudfront-v3.0.2.asciidoc b/docs/versioned-plugins/codecs/cloudfront-v3.0.2.asciidoc new file mode 100644 index 000000000..ecf6dd755 --- /dev/null +++ b/docs/versioned-plugins/codecs/cloudfront-v3.0.2.asciidoc @@ -0,0 +1,52 @@ +:plugin: cloudfront +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-cloudfront/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cloudfront codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec will read cloudfront encoded content + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cloudfront Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this codec. Examples include "UTF-8" and +"CP1252" + +JSON requires valid UTF-8 strings, but in some cases, software that +emits JSON does so in another encoding (nxlog, for example). In +weird cases like this, you can set the charset setting to the +actual encoding of the text and logstash will convert it for you. + +For nxlog users, you'll want to set this to "CP1252" + + diff --git a/docs/versioned-plugins/codecs/cloudfront-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/cloudfront-v3.0.3.asciidoc new file mode 100644 index 000000000..d65f2ae39 --- /dev/null +++ b/docs/versioned-plugins/codecs/cloudfront-v3.0.3.asciidoc @@ -0,0 +1,52 @@ +:plugin: cloudfront +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-cloudfront/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cloudfront codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec will read cloudfront encoded content + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cloudfront Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this codec. Examples include "UTF-8" and +"CP1252" + +JSON requires valid UTF-8 strings, but in some cases, software that +emits JSON does so in another encoding (nxlog, for example). In +weird cases like this, you can set the charset setting to the +actual encoding of the text and logstash will convert it for you. + +For nxlog users, you'll want to set this to "CP1252" + + diff --git a/docs/versioned-plugins/codecs/cloudtrail-index.asciidoc b/docs/versioned-plugins/codecs/cloudtrail-index.asciidoc new file mode 100644 index 000000000..67a3c393c --- /dev/null +++ b/docs/versioned-plugins/codecs/cloudtrail-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: cloudtrail +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::cloudtrail-v3.0.3.asciidoc[] +include::cloudtrail-v3.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/cloudtrail-v3.0.2.asciidoc b/docs/versioned-plugins/codecs/cloudtrail-v3.0.2.asciidoc new file mode 100644 index 000000000..de5d86860 --- /dev/null +++ b/docs/versioned-plugins/codecs/cloudtrail-v3.0.2.asciidoc @@ -0,0 +1,44 @@ +:plugin: cloudtrail +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-cloudtrail/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Cloudtrail codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This is the base class for logstash codecs. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cloudtrail Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + + + + diff --git a/docs/versioned-plugins/codecs/cloudtrail-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/cloudtrail-v3.0.3.asciidoc new file mode 100644 index 000000000..e9be8a358 --- /dev/null +++ b/docs/versioned-plugins/codecs/cloudtrail-v3.0.3.asciidoc @@ -0,0 +1,44 @@ +:plugin: cloudtrail +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-cloudtrail/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cloudtrail codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This is the base class for logstash codecs. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cloudtrail Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + + + + diff --git a/docs/versioned-plugins/codecs/collectd-index.asciidoc b/docs/versioned-plugins/codecs/collectd-index.asciidoc new file mode 100644 index 000000000..237e60ab0 --- /dev/null +++ b/docs/versioned-plugins/codecs/collectd-index.asciidoc @@ -0,0 +1,20 @@ +:plugin: collectd +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-30 +| <> | 2017-06-23 +| <> | 2017-06-06 +|======================================================================= + +include::collectd-v3.0.8.asciidoc[] +include::collectd-v3.0.7.asciidoc[] +include::collectd-v3.0.6.asciidoc[] +include::collectd-v3.0.5.asciidoc[] +include::collectd-v3.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/collectd-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/collectd-v3.0.4.asciidoc new file mode 100644 index 000000000..075bf0b6b --- /dev/null +++ b/docs/versioned-plugins/codecs/collectd-v3.0.4.asciidoc @@ -0,0 +1,147 @@ +:plugin: collectd +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-06 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-collectd/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Collectd + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from the collectd binary protocol over the network via udp. +See https://collectd.org/wiki/index.php/Binary_protocol + +Configuration in your Logstash configuration file can be as simple as: +[source,ruby] + input { + udp { + port => 25826 + buffer_size => 1452 + codec => collectd { } + } + } + +A sample `collectd.conf` to send to Logstash might be: +[source,xml] + Hostname "host.example.com" + LoadPlugin interface + LoadPlugin load + LoadPlugin memory + LoadPlugin network + + Interface "eth0" + IgnoreSelected false + + + + + + +Be sure to replace `10.0.0.1` with the IP of your Logstash instance. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Collectd Codec Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-authfile>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nan_handling>> |<>, one of `["change_value", "warn", "drop"]`|No +| <<{version}-plugins-{type}s-{plugin}-nan_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nan_value>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prune_intervals>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_level>> |<>, one of `["None", "Sign", "Encrypt"]`|No +| <<{version}-plugins-{type}s-{plugin}-typesdb>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +codec plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-authfile"] +===== `authfile` + + * Value type is <> + * There is no default value for this setting. + +Path to the authentication file. This file should have the same format as +the http://collectd.org/documentation/manpages/collectd.conf.5.shtml#authfile_filename[AuthFile] +in collectd. You only need to set this option if the `security_level` is set to +`Sign` or `Encrypt` + +[id="{version}-plugins-{type}s-{plugin}-nan_handling"] +===== `nan_handling` + + * Value can be any of: `change_value`, `warn`, `drop` + * Default value is `"change_value"` + +What to do when a value in the event is `NaN` (Not a Number) + +- change_value (default): Change the `NaN` to the value of the nan_value option and add `nan_tag` as a tag +- warn: Change the `NaN` to the value of the nan_value option, print a warning to the log and add `nan_tag` as a tag +- drop: Drop the event containing the `NaN` (this only drops the single event, not the whole packet) + +[id="{version}-plugins-{type}s-{plugin}-nan_tag"] +===== `nan_tag` + + * Value type is <> + * Default value is `"_collectdNaN"` + +The tag to add to the event if a `NaN` value was found +Set this to an empty string ('') if you don't want to tag + +[id="{version}-plugins-{type}s-{plugin}-nan_value"] +===== `nan_value` + + * Value type is <> + * Default value is `0` + +Only relevant when `nan_handeling` is set to `change_value` +Change NaN to this configured value + +[id="{version}-plugins-{type}s-{plugin}-prune_intervals"] +===== `prune_intervals` + + * Value type is <> + * Default value is `true` + +Prune interval records. Defaults to `true`. + +[id="{version}-plugins-{type}s-{plugin}-security_level"] +===== `security_level` + + * Value can be any of: `None`, `Sign`, `Encrypt` + * Default value is `"None"` + +Security Level. Default is `None`. This setting mirrors the setting from the +collectd https://collectd.org/wiki/index.php/Plugin:Network[Network plugin] + +[id="{version}-plugins-{type}s-{plugin}-typesdb"] +===== `typesdb` + + * Value type is <> + * There is no default value for this setting. + +File path(s) to collectd `types.db` to use. +The last matching pattern wins if you have identical pattern names in multiple files. +If no types.db is provided the included `types.db` will be used (currently 5.4.0). + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/codecs/collectd-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/collectd-v3.0.5.asciidoc new file mode 100644 index 000000000..4ab88c8d9 --- /dev/null +++ b/docs/versioned-plugins/codecs/collectd-v3.0.5.asciidoc @@ -0,0 +1,140 @@ +:plugin: collectd +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-collectd/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Collectd codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from the collectd binary protocol over the network via udp. +See https://collectd.org/wiki/index.php/Binary_protocol + +Configuration in your Logstash configuration file can be as simple as: +[source,ruby] + input { + udp { + port => 25826 + buffer_size => 1452 + codec => collectd { } + } + } + +A sample `collectd.conf` to send to Logstash might be: +[source,xml] + Hostname "host.example.com" + LoadPlugin interface + LoadPlugin load + LoadPlugin memory + LoadPlugin network + + Interface "eth0" + IgnoreSelected false + + + + + + +Be sure to replace `10.0.0.1` with the IP of your Logstash instance. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Collectd Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-authfile>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nan_handling>> |<>, one of `["change_value", "warn", "drop"]`|No +| <<{version}-plugins-{type}s-{plugin}-nan_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nan_value>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prune_intervals>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_level>> |<>, one of `["None", "Sign", "Encrypt"]`|No +| <<{version}-plugins-{type}s-{plugin}-typesdb>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-authfile"] +===== `authfile` + + * Value type is <> + * There is no default value for this setting. + +Path to the authentication file. This file should have the same format as +the http://collectd.org/documentation/manpages/collectd.conf.5.shtml#authfile_filename[AuthFile] +in collectd. You only need to set this option if the `security_level` is set to +`Sign` or `Encrypt` + +[id="{version}-plugins-{type}s-{plugin}-nan_handling"] +===== `nan_handling` + + * Value can be any of: `change_value`, `warn`, `drop` + * Default value is `"change_value"` + +What to do when a value in the event is `NaN` (Not a Number) + +- change_value (default): Change the `NaN` to the value of the nan_value option and add `nan_tag` as a tag +- warn: Change the `NaN` to the value of the nan_value option, print a warning to the log and add `nan_tag` as a tag +- drop: Drop the event containing the `NaN` (this only drops the single event, not the whole packet) + +[id="{version}-plugins-{type}s-{plugin}-nan_tag"] +===== `nan_tag` + + * Value type is <> + * Default value is `"_collectdNaN"` + +The tag to add to the event if a `NaN` value was found +Set this to an empty string ('') if you don't want to tag + +[id="{version}-plugins-{type}s-{plugin}-nan_value"] +===== `nan_value` + + * Value type is <> + * Default value is `0` + +Only relevant when `nan_handeling` is set to `change_value` +Change NaN to this configured value + +[id="{version}-plugins-{type}s-{plugin}-prune_intervals"] +===== `prune_intervals` + + * Value type is <> + * Default value is `true` + +Prune interval records. Defaults to `true`. + +[id="{version}-plugins-{type}s-{plugin}-security_level"] +===== `security_level` + + * Value can be any of: `None`, `Sign`, `Encrypt` + * Default value is `"None"` + +Security Level. Default is `None`. This setting mirrors the setting from the +collectd https://collectd.org/wiki/index.php/Plugin:Network[Network plugin] + +[id="{version}-plugins-{type}s-{plugin}-typesdb"] +===== `typesdb` + + * Value type is <> + * There is no default value for this setting. + +File path(s) to collectd `types.db` to use. +The last matching pattern wins if you have identical pattern names in multiple files. +If no types.db is provided the included `types.db` will be used (currently 5.4.0). + + diff --git a/docs/versioned-plugins/codecs/collectd-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/collectd-v3.0.6.asciidoc new file mode 100644 index 000000000..2369ccf42 --- /dev/null +++ b/docs/versioned-plugins/codecs/collectd-v3.0.6.asciidoc @@ -0,0 +1,140 @@ +:plugin: collectd +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-06-30 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-collectd/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Collectd codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from the collectd binary protocol over the network via udp. +See https://collectd.org/wiki/index.php/Binary_protocol + +Configuration in your Logstash configuration file can be as simple as: +[source,ruby] + input { + udp { + port => 25826 + buffer_size => 1452 + codec => collectd { } + } + } + +A sample `collectd.conf` to send to Logstash might be: +[source,xml] + Hostname "host.example.com" + LoadPlugin interface + LoadPlugin load + LoadPlugin memory + LoadPlugin network + + Interface "eth0" + IgnoreSelected false + + + + + + +Be sure to replace `10.0.0.1` with the IP of your Logstash instance. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Collectd Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-authfile>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nan_handling>> |<>, one of `["change_value", "warn", "drop"]`|No +| <<{version}-plugins-{type}s-{plugin}-nan_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nan_value>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prune_intervals>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_level>> |<>, one of `["None", "Sign", "Encrypt"]`|No +| <<{version}-plugins-{type}s-{plugin}-typesdb>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-authfile"] +===== `authfile` + + * Value type is <> + * There is no default value for this setting. + +Path to the authentication file. This file should have the same format as +the http://collectd.org/documentation/manpages/collectd.conf.5.shtml#authfile_filename[AuthFile] +in collectd. You only need to set this option if the `security_level` is set to +`Sign` or `Encrypt` + +[id="{version}-plugins-{type}s-{plugin}-nan_handling"] +===== `nan_handling` + + * Value can be any of: `change_value`, `warn`, `drop` + * Default value is `"change_value"` + +What to do when a value in the event is `NaN` (Not a Number) + +- change_value (default): Change the `NaN` to the value of the nan_value option and add `nan_tag` as a tag +- warn: Change the `NaN` to the value of the nan_value option, print a warning to the log and add `nan_tag` as a tag +- drop: Drop the event containing the `NaN` (this only drops the single event, not the whole packet) + +[id="{version}-plugins-{type}s-{plugin}-nan_tag"] +===== `nan_tag` + + * Value type is <> + * Default value is `"_collectdNaN"` + +The tag to add to the event if a `NaN` value was found +Set this to an empty string ('') if you don't want to tag + +[id="{version}-plugins-{type}s-{plugin}-nan_value"] +===== `nan_value` + + * Value type is <> + * Default value is `0` + +Only relevant when `nan_handeling` is set to `change_value` +Change NaN to this configured value + +[id="{version}-plugins-{type}s-{plugin}-prune_intervals"] +===== `prune_intervals` + + * Value type is <> + * Default value is `true` + +Prune interval records. Defaults to `true`. + +[id="{version}-plugins-{type}s-{plugin}-security_level"] +===== `security_level` + + * Value can be any of: `None`, `Sign`, `Encrypt` + * Default value is `"None"` + +Security Level. Default is `None`. This setting mirrors the setting from the +collectd https://collectd.org/wiki/index.php/Plugin:Network[Network plugin] + +[id="{version}-plugins-{type}s-{plugin}-typesdb"] +===== `typesdb` + + * Value type is <> + * There is no default value for this setting. + +File path(s) to collectd `types.db` to use. +The last matching pattern wins if you have identical pattern names in multiple files. +If no types.db is provided the included `types.db` will be used (currently 5.4.0). + + diff --git a/docs/versioned-plugins/codecs/collectd-v3.0.7.asciidoc b/docs/versioned-plugins/codecs/collectd-v3.0.7.asciidoc new file mode 100644 index 000000000..6e0a0e0f1 --- /dev/null +++ b/docs/versioned-plugins/codecs/collectd-v3.0.7.asciidoc @@ -0,0 +1,140 @@ +:plugin: collectd +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.7 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-collectd/blob/v3.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Collectd codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from the collectd binary protocol over the network via udp. +See https://collectd.org/wiki/index.php/Binary_protocol + +Configuration in your Logstash configuration file can be as simple as: +[source,ruby] + input { + udp { + port => 25826 + buffer_size => 1452 + codec => collectd { } + } + } + +A sample `collectd.conf` to send to Logstash might be: +[source,xml] + Hostname "host.example.com" + LoadPlugin interface + LoadPlugin load + LoadPlugin memory + LoadPlugin network + + Interface "eth0" + IgnoreSelected false + + + + + + +Be sure to replace `10.0.0.1` with the IP of your Logstash instance. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Collectd Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-authfile>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nan_handling>> |<>, one of `["change_value", "warn", "drop"]`|No +| <<{version}-plugins-{type}s-{plugin}-nan_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nan_value>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prune_intervals>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_level>> |<>, one of `["None", "Sign", "Encrypt"]`|No +| <<{version}-plugins-{type}s-{plugin}-typesdb>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-authfile"] +===== `authfile` + + * Value type is <> + * There is no default value for this setting. + +Path to the authentication file. This file should have the same format as +the http://collectd.org/documentation/manpages/collectd.conf.5.shtml#authfile_filename[AuthFile] +in collectd. You only need to set this option if the `security_level` is set to +`Sign` or `Encrypt` + +[id="{version}-plugins-{type}s-{plugin}-nan_handling"] +===== `nan_handling` + + * Value can be any of: `change_value`, `warn`, `drop` + * Default value is `"change_value"` + +What to do when a value in the event is `NaN` (Not a Number) + +- change_value (default): Change the `NaN` to the value of the nan_value option and add `nan_tag` as a tag +- warn: Change the `NaN` to the value of the nan_value option, print a warning to the log and add `nan_tag` as a tag +- drop: Drop the event containing the `NaN` (this only drops the single event, not the whole packet) + +[id="{version}-plugins-{type}s-{plugin}-nan_tag"] +===== `nan_tag` + + * Value type is <> + * Default value is `"_collectdNaN"` + +The tag to add to the event if a `NaN` value was found +Set this to an empty string ('') if you don't want to tag + +[id="{version}-plugins-{type}s-{plugin}-nan_value"] +===== `nan_value` + + * Value type is <> + * Default value is `0` + +Only relevant when `nan_handeling` is set to `change_value` +Change NaN to this configured value + +[id="{version}-plugins-{type}s-{plugin}-prune_intervals"] +===== `prune_intervals` + + * Value type is <> + * Default value is `true` + +Prune interval records. Defaults to `true`. + +[id="{version}-plugins-{type}s-{plugin}-security_level"] +===== `security_level` + + * Value can be any of: `None`, `Sign`, `Encrypt` + * Default value is `"None"` + +Security Level. Default is `None`. This setting mirrors the setting from the +collectd https://collectd.org/wiki/index.php/Plugin:Network[Network plugin] + +[id="{version}-plugins-{type}s-{plugin}-typesdb"] +===== `typesdb` + + * Value type is <> + * There is no default value for this setting. + +File path(s) to collectd `types.db` to use. +The last matching pattern wins if you have identical pattern names in multiple files. +If no types.db is provided the included `types.db` will be used (currently 5.4.0). + + diff --git a/docs/versioned-plugins/codecs/collectd-v3.0.8.asciidoc b/docs/versioned-plugins/codecs/collectd-v3.0.8.asciidoc new file mode 100644 index 000000000..0c4a98655 --- /dev/null +++ b/docs/versioned-plugins/codecs/collectd-v3.0.8.asciidoc @@ -0,0 +1,140 @@ +:plugin: collectd +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.8 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-collectd/blob/v3.0.8/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Collectd codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from the collectd binary protocol over the network via udp. +See https://collectd.org/wiki/index.php/Binary_protocol + +Configuration in your Logstash configuration file can be as simple as: +[source,ruby] + input { + udp { + port => 25826 + buffer_size => 1452 + codec => collectd { } + } + } + +A sample `collectd.conf` to send to Logstash might be: +[source,xml] + Hostname "host.example.com" + LoadPlugin interface + LoadPlugin load + LoadPlugin memory + LoadPlugin network + + Interface "eth0" + IgnoreSelected false + + + + + + +Be sure to replace `10.0.0.1` with the IP of your Logstash instance. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Collectd Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-authfile>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nan_handling>> |<>, one of `["change_value", "warn", "drop"]`|No +| <<{version}-plugins-{type}s-{plugin}-nan_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nan_value>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prune_intervals>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_level>> |<>, one of `["None", "Sign", "Encrypt"]`|No +| <<{version}-plugins-{type}s-{plugin}-typesdb>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-authfile"] +===== `authfile` + + * Value type is <> + * There is no default value for this setting. + +Path to the authentication file. This file should have the same format as +the http://collectd.org/documentation/manpages/collectd.conf.5.shtml#authfile_filename[AuthFile] +in collectd. You only need to set this option if the `security_level` is set to +`Sign` or `Encrypt` + +[id="{version}-plugins-{type}s-{plugin}-nan_handling"] +===== `nan_handling` + + * Value can be any of: `change_value`, `warn`, `drop` + * Default value is `"change_value"` + +What to do when a value in the event is `NaN` (Not a Number) + +- change_value (default): Change the `NaN` to the value of the nan_value option and add `nan_tag` as a tag +- warn: Change the `NaN` to the value of the nan_value option, print a warning to the log and add `nan_tag` as a tag +- drop: Drop the event containing the `NaN` (this only drops the single event, not the whole packet) + +[id="{version}-plugins-{type}s-{plugin}-nan_tag"] +===== `nan_tag` + + * Value type is <> + * Default value is `"_collectdNaN"` + +The tag to add to the event if a `NaN` value was found +Set this to an empty string ('') if you don't want to tag + +[id="{version}-plugins-{type}s-{plugin}-nan_value"] +===== `nan_value` + + * Value type is <> + * Default value is `0` + +Only relevant when `nan_handeling` is set to `change_value` +Change NaN to this configured value + +[id="{version}-plugins-{type}s-{plugin}-prune_intervals"] +===== `prune_intervals` + + * Value type is <> + * Default value is `true` + +Prune interval records. Defaults to `true`. + +[id="{version}-plugins-{type}s-{plugin}-security_level"] +===== `security_level` + + * Value can be any of: `None`, `Sign`, `Encrypt` + * Default value is `"None"` + +Security Level. Default is `None`. This setting mirrors the setting from the +collectd https://collectd.org/wiki/index.php/Plugin:Network[Network plugin] + +[id="{version}-plugins-{type}s-{plugin}-typesdb"] +===== `typesdb` + + * Value type is <> + * There is no default value for this setting. + +File path(s) to collectd `types.db` to use. +The last matching pattern wins if you have identical pattern names in multiple files. +If no types.db is provided the included `types.db` will be used (currently 5.4.0). + + diff --git a/docs/versioned-plugins/codecs/compress_spooler-index.asciidoc b/docs/versioned-plugins/codecs/compress_spooler-index.asciidoc new file mode 100644 index 000000000..985b344f0 --- /dev/null +++ b/docs/versioned-plugins/codecs/compress_spooler-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: compress_spooler +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::compress_spooler-v2.0.6.asciidoc[] +include::compress_spooler-v2.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/compress_spooler-v2.0.5.asciidoc b/docs/versioned-plugins/codecs/compress_spooler-v2.0.5.asciidoc new file mode 100644 index 000000000..1984e58f2 --- /dev/null +++ b/docs/versioned-plugins/codecs/compress_spooler-v2.0.5.asciidoc @@ -0,0 +1,64 @@ +:plugin: compress_spooler +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-compress_spooler/blob/v2.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Compress_spooler codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Compress_spooler Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-compress_level>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-min_flush_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-spool_size>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-compress_level"] +===== `compress_level` + + * Value type is <> + * Default value is `6` + + + +[id="{version}-plugins-{type}s-{plugin}-min_flush_time"] +===== `min_flush_time` + + * Value type is <> + * Default value is `0` + +The amount of time in seconds since last flush before a flush is forced, +on the next event. +Values smaller than 0 disables time based flushing. + +[id="{version}-plugins-{type}s-{plugin}-spool_size"] +===== `spool_size` + + * Value type is <> + * Default value is `50` + + + + diff --git a/docs/versioned-plugins/codecs/compress_spooler-v2.0.6.asciidoc b/docs/versioned-plugins/codecs/compress_spooler-v2.0.6.asciidoc new file mode 100644 index 000000000..2ec27c0b8 --- /dev/null +++ b/docs/versioned-plugins/codecs/compress_spooler-v2.0.6.asciidoc @@ -0,0 +1,64 @@ +:plugin: compress_spooler +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.6 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-compress_spooler/blob/v2.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Compress_spooler codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Compress_spooler Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-compress_level>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-min_flush_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-spool_size>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-compress_level"] +===== `compress_level` + + * Value type is <> + * Default value is `6` + + + +[id="{version}-plugins-{type}s-{plugin}-min_flush_time"] +===== `min_flush_time` + + * Value type is <> + * Default value is `0` + +The amount of time in seconds since last flush before a flush is forced, +on the next event. +Values smaller than 0 disables time based flushing. + +[id="{version}-plugins-{type}s-{plugin}-spool_size"] +===== `spool_size` + + * Value type is <> + * Default value is `50` + + + + diff --git a/docs/versioned-plugins/codecs/csv-index.asciidoc b/docs/versioned-plugins/codecs/csv-index.asciidoc new file mode 100644 index 000000000..431aa2609 --- /dev/null +++ b/docs/versioned-plugins/codecs/csv-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: csv +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::csv-v0.1.4.asciidoc[] +include::csv-v0.1.3.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/csv-v0.1.3.asciidoc b/docs/versioned-plugins/codecs/csv-v0.1.3.asciidoc new file mode 100644 index 000000000..16c9c2efd --- /dev/null +++ b/docs/versioned-plugins/codecs/csv-v0.1.3.asciidoc @@ -0,0 +1,132 @@ +:plugin: csv +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v0.1.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-csv/blob/v0.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Csv codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Csv Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-autogenerate_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-columns>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-quote_char>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-skip_empty_columns>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-autogenerate_column_names"] +===== `autogenerate_column_names` + + * Value type is <> + * Default value is `true` + +Define whether column names should autogenerated or not. +Defaults to true. If set to false, columns not having a header specified will not be parsed. + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +List of valid conversion types used for the convert option +The character encoding used in this codec. Examples include "UTF-8" and +"CP1252". + +[id="{version}-plugins-{type}s-{plugin}-columns"] +===== `columns` + + * Value type is <> + * Default value is `[]` + +Define a list of column names (in the order they appear in the CSV, +as if it were a header line). If `columns` is not configured, or there +are not enough columns specified, the default column names are +"column1", "column2", etc. In the case that there are more columns +in the data than specified in this column list, extra columns will be auto-numbered: +(e.g. "user_defined_1", "user_defined_2", "column3", "column4", etc.) + +[id="{version}-plugins-{type}s-{plugin}-convert"] +===== `convert` + + * Value type is <> + * Default value is `{}` + +Define a set of datatype conversions to be applied to columns. +Possible conversions are integer, float, date, date_time, boolean + +# Example: +[source,ruby] + filter { + csv { + convert => { "column1" => "integer", "column2" => "boolean" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-include_headers"] +===== `include_headers` + + * Value type is <> + * Default value is `false` + +Treats the first line received as the hearder information, this information will +be used to compose the field names in the generated events. Note this information can +be reset on demand, useful for example when dealing with new files in the file input +or new request in the http_poller. Default => false + +[id="{version}-plugins-{type}s-{plugin}-quote_char"] +===== `quote_char` + + * Value type is <> + * Default value is `"\""` + +Define the character used to quote CSV fields. If this is not specified +the default is a double quote `"`. +Optional. + +[id="{version}-plugins-{type}s-{plugin}-separator"] +===== `separator` + + * Value type is <> + * Default value is `","` + +Define the column separator value. If this is not specified, the default +is a comma `,`. +Optional. + +[id="{version}-plugins-{type}s-{plugin}-skip_empty_columns"] +===== `skip_empty_columns` + + * Value type is <> + * Default value is `false` + +Define whether empty columns should be skipped. +Defaults to false. If set to true, columns containing no value will not get set. + + diff --git a/docs/versioned-plugins/codecs/csv-v0.1.4.asciidoc b/docs/versioned-plugins/codecs/csv-v0.1.4.asciidoc new file mode 100644 index 000000000..ad1693a62 --- /dev/null +++ b/docs/versioned-plugins/codecs/csv-v0.1.4.asciidoc @@ -0,0 +1,132 @@ +:plugin: csv +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v0.1.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-csv/blob/v0.1.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Csv codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Csv Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-autogenerate_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-columns>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-quote_char>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-skip_empty_columns>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-autogenerate_column_names"] +===== `autogenerate_column_names` + + * Value type is <> + * Default value is `true` + +Define whether column names should autogenerated or not. +Defaults to true. If set to false, columns not having a header specified will not be parsed. + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +List of valid conversion types used for the convert option +The character encoding used in this codec. Examples include "UTF-8" and +"CP1252". + +[id="{version}-plugins-{type}s-{plugin}-columns"] +===== `columns` + + * Value type is <> + * Default value is `[]` + +Define a list of column names (in the order they appear in the CSV, +as if it were a header line). If `columns` is not configured, or there +are not enough columns specified, the default column names are +"column1", "column2", etc. In the case that there are more columns +in the data than specified in this column list, extra columns will be auto-numbered: +(e.g. "user_defined_1", "user_defined_2", "column3", "column4", etc.) + +[id="{version}-plugins-{type}s-{plugin}-convert"] +===== `convert` + + * Value type is <> + * Default value is `{}` + +Define a set of datatype conversions to be applied to columns. +Possible conversions are integer, float, date, date_time, boolean + +# Example: +[source,ruby] + filter { + csv { + convert => { "column1" => "integer", "column2" => "boolean" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-include_headers"] +===== `include_headers` + + * Value type is <> + * Default value is `false` + +Treats the first line received as the hearder information, this information will +be used to compose the field names in the generated events. Note this information can +be reset on demand, useful for example when dealing with new files in the file input +or new request in the http_poller. Default => false + +[id="{version}-plugins-{type}s-{plugin}-quote_char"] +===== `quote_char` + + * Value type is <> + * Default value is `"\""` + +Define the character used to quote CSV fields. If this is not specified +the default is a double quote `"`. +Optional. + +[id="{version}-plugins-{type}s-{plugin}-separator"] +===== `separator` + + * Value type is <> + * Default value is `","` + +Define the column separator value. If this is not specified, the default +is a comma `,`. +Optional. + +[id="{version}-plugins-{type}s-{plugin}-skip_empty_columns"] +===== `skip_empty_columns` + + * Value type is <> + * Default value is `false` + +Define whether empty columns should be skipped. +Defaults to false. If set to true, columns containing no value will not get set. + + diff --git a/docs/versioned-plugins/codecs/dots-index.asciidoc b/docs/versioned-plugins/codecs/dots-index.asciidoc new file mode 100644 index 000000000..8bffee66c --- /dev/null +++ b/docs/versioned-plugins/codecs/dots-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: dots +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::dots-v3.0.6.asciidoc[] +include::dots-v3.0.5.asciidoc[] +include::dots-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/dots-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/dots-v3.0.3.asciidoc new file mode 100644 index 000000000..06c3bdbae --- /dev/null +++ b/docs/versioned-plugins/codecs/dots-v3.0.3.asciidoc @@ -0,0 +1,31 @@ +:plugin: dots +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-dots/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Dots codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Dots Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +|======================================================================= diff --git a/docs/versioned-plugins/codecs/dots-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/dots-v3.0.5.asciidoc new file mode 100644 index 000000000..6e36ff916 --- /dev/null +++ b/docs/versioned-plugins/codecs/dots-v3.0.5.asciidoc @@ -0,0 +1,23 @@ +:plugin: dots +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-dots/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Dots codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec generates a dot(`.`) to represent each Event it processes. This is typically used with `stdout` output to provide feedback on the terminal. It is also used to measure Logstash's throughtput with the `pv` command. diff --git a/docs/versioned-plugins/codecs/dots-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/dots-v3.0.6.asciidoc new file mode 100644 index 000000000..f217f753f --- /dev/null +++ b/docs/versioned-plugins/codecs/dots-v3.0.6.asciidoc @@ -0,0 +1,23 @@ +:plugin: dots +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-dots/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Dots codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec generates a dot(`.`) to represent each Event it processes. This is typically used with `stdout` output to provide feedback on the terminal. It is also used to measure Logstash's throughtput with the `pv` command. diff --git a/docs/versioned-plugins/codecs/edn-index.asciidoc b/docs/versioned-plugins/codecs/edn-index.asciidoc new file mode 100644 index 000000000..58f4d4369 --- /dev/null +++ b/docs/versioned-plugins/codecs/edn-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: edn +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::edn-v3.0.6.asciidoc[] +include::edn-v3.0.5.asciidoc[] +include::edn-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/edn-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/edn-v3.0.3.asciidoc new file mode 100644 index 000000000..8c779c5d7 --- /dev/null +++ b/docs/versioned-plugins/codecs/edn-v3.0.3.asciidoc @@ -0,0 +1,33 @@ +:plugin: edn +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-edn/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Edn codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Edn Codec Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +|======================================================================= diff --git a/docs/versioned-plugins/codecs/edn-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/edn-v3.0.5.asciidoc new file mode 100644 index 000000000..6afee1e83 --- /dev/null +++ b/docs/versioned-plugins/codecs/edn-v3.0.5.asciidoc @@ -0,0 +1,25 @@ +:plugin: edn +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-edn/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Edn codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Reads and produces EDN format data. + + diff --git a/docs/versioned-plugins/codecs/edn-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/edn-v3.0.6.asciidoc new file mode 100644 index 000000000..d58ac23dc --- /dev/null +++ b/docs/versioned-plugins/codecs/edn-v3.0.6.asciidoc @@ -0,0 +1,25 @@ +:plugin: edn +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-edn/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Edn codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Reads and produces EDN format data. + + diff --git a/docs/versioned-plugins/codecs/edn_lines-index.asciidoc b/docs/versioned-plugins/codecs/edn_lines-index.asciidoc new file mode 100644 index 000000000..9656a0df3 --- /dev/null +++ b/docs/versioned-plugins/codecs/edn_lines-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: edn_lines +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::edn_lines-v3.0.6.asciidoc[] +include::edn_lines-v3.0.5.asciidoc[] +include::edn_lines-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/edn_lines-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/edn_lines-v3.0.3.asciidoc new file mode 100644 index 000000000..952290275 --- /dev/null +++ b/docs/versioned-plugins/codecs/edn_lines-v3.0.3.asciidoc @@ -0,0 +1,31 @@ +:plugin: edn_lines +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-edn_lines/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Edn_lines codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Edn_lines Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +|======================================================================= diff --git a/docs/versioned-plugins/codecs/edn_lines-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/edn_lines-v3.0.5.asciidoc new file mode 100644 index 000000000..759e6b462 --- /dev/null +++ b/docs/versioned-plugins/codecs/edn_lines-v3.0.5.asciidoc @@ -0,0 +1,23 @@ +:plugin: edn_lines +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-edn_lines/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Edn_lines codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Reads and produces newline-delimited EDN format data. diff --git a/docs/versioned-plugins/codecs/edn_lines-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/edn_lines-v3.0.6.asciidoc new file mode 100644 index 000000000..56e5802b7 --- /dev/null +++ b/docs/versioned-plugins/codecs/edn_lines-v3.0.6.asciidoc @@ -0,0 +1,23 @@ +:plugin: edn_lines +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-edn_lines/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Edn_lines codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Reads and produces newline-delimited EDN format data. diff --git a/docs/versioned-plugins/codecs/es_bulk-index.asciidoc b/docs/versioned-plugins/codecs/es_bulk-index.asciidoc new file mode 100644 index 000000000..faba473ef --- /dev/null +++ b/docs/versioned-plugins/codecs/es_bulk-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: es_bulk +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::es_bulk-v3.0.6.asciidoc[] +include::es_bulk-v3.0.5.asciidoc[] +include::es_bulk-v3.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/es_bulk-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/es_bulk-v3.0.4.asciidoc new file mode 100644 index 000000000..2cd33cb27 --- /dev/null +++ b/docs/versioned-plugins/codecs/es_bulk-v3.0.4.asciidoc @@ -0,0 +1,35 @@ +:plugin: es_bulk +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-es_bulk/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Es_bulk codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec will decode the http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk format] +into individual events, plus metadata into the `@metadata` field. + +Encoding is not supported at this time as the Elasticsearch +output submits Logstash events in bulk format. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Es_bulk Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +|======================================================================= diff --git a/docs/versioned-plugins/codecs/es_bulk-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/es_bulk-v3.0.5.asciidoc new file mode 100644 index 000000000..53c00a939 --- /dev/null +++ b/docs/versioned-plugins/codecs/es_bulk-v3.0.5.asciidoc @@ -0,0 +1,28 @@ +:plugin: es_bulk +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-es_bulk/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Es_bulk codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec will decode the http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk format] +into individual events, plus metadata into the `@metadata` field. + +Encoding is not supported at this time as the Elasticsearch +output submits Logstash events in bulk format. + diff --git a/docs/versioned-plugins/codecs/es_bulk-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/es_bulk-v3.0.6.asciidoc new file mode 100644 index 000000000..07588b3e4 --- /dev/null +++ b/docs/versioned-plugins/codecs/es_bulk-v3.0.6.asciidoc @@ -0,0 +1,28 @@ +:plugin: es_bulk +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-es_bulk/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Es_bulk codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec will decode the http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk format] +into individual events, plus metadata into the `@metadata` field. + +Encoding is not supported at this time as the Elasticsearch +output submits Logstash events in bulk format. + diff --git a/docs/versioned-plugins/codecs/example-index.asciidoc b/docs/versioned-plugins/codecs/example-index.asciidoc new file mode 100644 index 000000000..cf9b9a19d --- /dev/null +++ b/docs/versioned-plugins/codecs/example-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: example +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/codecs/fluent-index.asciidoc b/docs/versioned-plugins/codecs/fluent-index.asciidoc new file mode 100644 index 000000000..84403dcac --- /dev/null +++ b/docs/versioned-plugins/codecs/fluent-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: fluent +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-10-12 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::fluent-v3.1.5.asciidoc[] +include::fluent-v3.1.4.asciidoc[] +include::fluent-v3.1.3.asciidoc[] +include::fluent-v3.1.2.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/fluent-v3.1.2.asciidoc b/docs/versioned-plugins/codecs/fluent-v3.1.2.asciidoc new file mode 100644 index 000000000..18ed83985 --- /dev/null +++ b/docs/versioned-plugins/codecs/fluent-v3.1.2.asciidoc @@ -0,0 +1,51 @@ +:plugin: fluent +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-fluent/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Fluent codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec handles fluentd's msgpack schema. + +For example, you can receive logs from `fluent-logger-ruby` with: +[source,ruby] + input { + tcp { + codec => fluent + port => 4000 + } + } + +And from your ruby code in your own application: +[source,ruby] + logger = Fluent::Logger::FluentLogger.new(nil, :host => "example.log", :port => 4000) + logger.post("some_tag", { "your" => "data", "here" => "yay!" }) + +Notes: + +* the fluent uses a second-precision time for events, so you will never see + subsecond precision on events processed by this codec. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Fluent Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +|======================================================================= diff --git a/docs/versioned-plugins/codecs/fluent-v3.1.3.asciidoc b/docs/versioned-plugins/codecs/fluent-v3.1.3.asciidoc new file mode 100644 index 000000000..aae0a595c --- /dev/null +++ b/docs/versioned-plugins/codecs/fluent-v3.1.3.asciidoc @@ -0,0 +1,43 @@ +:plugin: fluent +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-fluent/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Fluent codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec handles fluentd's msgpack schema. + +For example, you can receive logs from `fluent-logger-ruby` with: +[source,ruby] + input { + tcp { + codec => fluent + port => 4000 + } + } + +And from your ruby code in your own application: +[source,ruby] + logger = Fluent::Logger::FluentLogger.new(nil, :host => "example.log", :port => 4000) + logger.post("some_tag", { "your" => "data", "here" => "yay!" }) + +Notes: + +* the fluent uses a second-precision time for events, so you will never see + subsecond precision on events processed by this codec. + diff --git a/docs/versioned-plugins/codecs/fluent-v3.1.4.asciidoc b/docs/versioned-plugins/codecs/fluent-v3.1.4.asciidoc new file mode 100644 index 000000000..6aa1d9218 --- /dev/null +++ b/docs/versioned-plugins/codecs/fluent-v3.1.4.asciidoc @@ -0,0 +1,43 @@ +:plugin: fluent +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.4 +:release_date: 2017-10-12 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-fluent/blob/v3.1.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Fluent codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec handles fluentd's msgpack schema. + +For example, you can receive logs from `fluent-logger-ruby` with: +[source,ruby] + input { + tcp { + codec => fluent + port => 4000 + } + } + +And from your ruby code in your own application: +[source,ruby] + logger = Fluent::Logger::FluentLogger.new(nil, :host => "example.log", :port => 4000) + logger.post("some_tag", { "your" => "data", "here" => "yay!" }) + +Notes: + +* the fluent uses a second-precision time for events, so you will never see + subsecond precision on events processed by this codec. + diff --git a/docs/versioned-plugins/codecs/fluent-v3.1.5.asciidoc b/docs/versioned-plugins/codecs/fluent-v3.1.5.asciidoc new file mode 100644 index 000000000..6a1d3aec2 --- /dev/null +++ b/docs/versioned-plugins/codecs/fluent-v3.1.5.asciidoc @@ -0,0 +1,43 @@ +:plugin: fluent +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-fluent/blob/v3.1.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Fluent codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec handles fluentd's msgpack schema. + +For example, you can receive logs from `fluent-logger-ruby` with: +[source,ruby] + input { + tcp { + codec => fluent + port => 4000 + } + } + +And from your ruby code in your own application: +[source,ruby] + logger = Fluent::Logger::FluentLogger.new(nil, :host => "example.log", :port => 4000) + logger.post("some_tag", { "your" => "data", "here" => "yay!" }) + +Notes: + +* the fluent uses a second-precision time for events, so you will never see + subsecond precision on events processed by this codec. + diff --git a/docs/versioned-plugins/codecs/graphite-index.asciidoc b/docs/versioned-plugins/codecs/graphite-index.asciidoc new file mode 100644 index 000000000..75287d628 --- /dev/null +++ b/docs/versioned-plugins/codecs/graphite-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: graphite +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::graphite-v3.0.5.asciidoc[] +include::graphite-v3.0.4.asciidoc[] +include::graphite-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/graphite-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/graphite-v3.0.3.asciidoc new file mode 100644 index 000000000..911a801f7 --- /dev/null +++ b/docs/versioned-plugins/codecs/graphite-v3.0.3.asciidoc @@ -0,0 +1,93 @@ +:plugin: graphite +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-graphite/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Graphite codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec will encode and decode Graphite formated lines. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Graphite Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-exclude_metrics"] +===== `exclude_metrics` + + * Value type is <> + * Default value is `["%{[^}]+}"]` + +Exclude regex matched metric names, by default exclude unresolved %{field} strings + +[id="{version}-plugins-{type}s-{plugin}-fields_are_metrics"] +===== `fields_are_metrics` + + * Value type is <> + * Default value is `false` + +Indicate that the event @fields should be treated as metrics and will be sent as is to graphite + +[id="{version}-plugins-{type}s-{plugin}-include_metrics"] +===== `include_metrics` + + * Value type is <> + * Default value is `[".*"]` + +Include only regex matched metric names + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value type is <> + * Default value is `{}` + +The metric(s) to use. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. Example: +[source,ruby] + [ "%{host}/uptime", "%{uptime_1m}" ] + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-metrics_format"] +===== `metrics_format` + + * Value type is <> + * Default value is `"*"` + +Defines format of the metric string. The placeholder `*` will be +replaced with the name of the actual metric. This supports dynamic +strings like `%{host}`. +[source,ruby] + metrics_format => "%{host}.foo.bar.*.sum" + +NOTE: If no metrics_format is defined the name of the metric will be used as fallback. + + diff --git a/docs/versioned-plugins/codecs/graphite-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/graphite-v3.0.4.asciidoc new file mode 100644 index 000000000..7674051cc --- /dev/null +++ b/docs/versioned-plugins/codecs/graphite-v3.0.4.asciidoc @@ -0,0 +1,93 @@ +:plugin: graphite +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-graphite/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Graphite codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec will encode and decode Graphite formated lines. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Graphite Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-exclude_metrics"] +===== `exclude_metrics` + + * Value type is <> + * Default value is `["%{[^}]+}"]` + +Exclude regex matched metric names, by default exclude unresolved %{field} strings + +[id="{version}-plugins-{type}s-{plugin}-fields_are_metrics"] +===== `fields_are_metrics` + + * Value type is <> + * Default value is `false` + +Indicate that the event @fields should be treated as metrics and will be sent as is to graphite + +[id="{version}-plugins-{type}s-{plugin}-include_metrics"] +===== `include_metrics` + + * Value type is <> + * Default value is `[".*"]` + +Include only regex matched metric names + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value type is <> + * Default value is `{}` + +The metric(s) to use. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. Example: +[source,ruby] + [ "%{host}/uptime", "%{uptime_1m}" ] + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-metrics_format"] +===== `metrics_format` + + * Value type is <> + * Default value is `"*"` + +Defines format of the metric string. The placeholder `*` will be +replaced with the name of the actual metric. This supports dynamic +strings like `%{host}`. +[source,ruby] + metrics_format => "%{host}.foo.bar.*.sum" + +NOTE: If no metrics_format is defined the name of the metric will be used as fallback. + + diff --git a/docs/versioned-plugins/codecs/graphite-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/graphite-v3.0.5.asciidoc new file mode 100644 index 000000000..e61023f0d --- /dev/null +++ b/docs/versioned-plugins/codecs/graphite-v3.0.5.asciidoc @@ -0,0 +1,93 @@ +:plugin: graphite +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-graphite/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Graphite codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec will encode and decode Graphite formated lines. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Graphite Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-exclude_metrics"] +===== `exclude_metrics` + + * Value type is <> + * Default value is `["%{[^}]+}"]` + +Exclude regex matched metric names, by default exclude unresolved %{field} strings + +[id="{version}-plugins-{type}s-{plugin}-fields_are_metrics"] +===== `fields_are_metrics` + + * Value type is <> + * Default value is `false` + +Indicate that the event @fields should be treated as metrics and will be sent as is to graphite + +[id="{version}-plugins-{type}s-{plugin}-include_metrics"] +===== `include_metrics` + + * Value type is <> + * Default value is `[".*"]` + +Include only regex matched metric names + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value type is <> + * Default value is `{}` + +The metric(s) to use. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. Example: +[source,ruby] + [ "%{host}/uptime", "%{uptime_1m}" ] + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-metrics_format"] +===== `metrics_format` + + * Value type is <> + * Default value is `"*"` + +Defines format of the metric string. The placeholder `*` will be +replaced with the name of the actual metric. This supports dynamic +strings like `%{host}`. +[source,ruby] + metrics_format => "%{host}.foo.bar.*.sum" + +NOTE: If no metrics_format is defined the name of the metric will be used as fallback. + + diff --git a/docs/versioned-plugins/codecs/gzip_lines-index.asciidoc b/docs/versioned-plugins/codecs/gzip_lines-index.asciidoc new file mode 100644 index 000000000..64a11da62 --- /dev/null +++ b/docs/versioned-plugins/codecs/gzip_lines-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: gzip_lines +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +| <> | 2017-06-19 +|======================================================================= + +include::gzip_lines-v3.0.3.asciidoc[] +include::gzip_lines-v3.0.2.asciidoc[] +include::gzip_lines-v3.0.1.asciidoc[] +include::gzip_lines-v3.0.0.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/gzip_lines-v3.0.0.asciidoc b/docs/versioned-plugins/codecs/gzip_lines-v3.0.0.asciidoc new file mode 100644 index 000000000..20575bac8 --- /dev/null +++ b/docs/versioned-plugins/codecs/gzip_lines-v3.0.0.asciidoc @@ -0,0 +1,59 @@ +:plugin: gzip_lines +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.0 +:release_date: 2017-06-19 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-gzip_lines/blob/v3.0.0/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Gzip_lines codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec will read gzip encoded content + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Gzip_lines Codec Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +|======================================================================= + +Also see <> for a list of options supported by all +codec plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this codec. Examples include "UTF-8" and +"CP1252" + +JSON requires valid UTF-8 strings, but in some cases, software that +emits JSON does so in another encoding (nxlog, for example). In +weird cases like this, you can set the charset setting to the +actual encoding of the text and logstash will convert it for you. + +For nxlog users, you'll want to set this to "CP1252" + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/codecs/gzip_lines-v3.0.1.asciidoc b/docs/versioned-plugins/codecs/gzip_lines-v3.0.1.asciidoc new file mode 100644 index 000000000..326cd59a4 --- /dev/null +++ b/docs/versioned-plugins/codecs/gzip_lines-v3.0.1.asciidoc @@ -0,0 +1,52 @@ +:plugin: gzip_lines +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-gzip_lines/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Gzip_lines codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec will read gzip encoded content + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Gzip_lines Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this codec. Examples include "UTF-8" and +"CP1252" + +JSON requires valid UTF-8 strings, but in some cases, software that +emits JSON does so in another encoding (nxlog, for example). In +weird cases like this, you can set the charset setting to the +actual encoding of the text and logstash will convert it for you. + +For nxlog users, you'll want to set this to "CP1252" + + diff --git a/docs/versioned-plugins/codecs/gzip_lines-v3.0.2.asciidoc b/docs/versioned-plugins/codecs/gzip_lines-v3.0.2.asciidoc new file mode 100644 index 000000000..3a3d810b0 --- /dev/null +++ b/docs/versioned-plugins/codecs/gzip_lines-v3.0.2.asciidoc @@ -0,0 +1,52 @@ +:plugin: gzip_lines +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-gzip_lines/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Gzip_lines codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec will read gzip encoded content + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Gzip_lines Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this codec. Examples include "UTF-8" and +"CP1252" + +JSON requires valid UTF-8 strings, but in some cases, software that +emits JSON does so in another encoding (nxlog, for example). In +weird cases like this, you can set the charset setting to the +actual encoding of the text and logstash will convert it for you. + +For nxlog users, you'll want to set this to "CP1252" + + diff --git a/docs/versioned-plugins/codecs/gzip_lines-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/gzip_lines-v3.0.3.asciidoc new file mode 100644 index 000000000..c56951293 --- /dev/null +++ b/docs/versioned-plugins/codecs/gzip_lines-v3.0.3.asciidoc @@ -0,0 +1,52 @@ +:plugin: gzip_lines +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-gzip_lines/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Gzip_lines codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec will read gzip encoded content + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Gzip_lines Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this codec. Examples include "UTF-8" and +"CP1252" + +JSON requires valid UTF-8 strings, but in some cases, software that +emits JSON does so in another encoding (nxlog, for example). In +weird cases like this, you can set the charset setting to the +actual encoding of the text and logstash will convert it for you. + +For nxlog users, you'll want to set this to "CP1252" + + diff --git a/docs/versioned-plugins/codecs/json-index.asciidoc b/docs/versioned-plugins/codecs/json-index.asciidoc new file mode 100644 index 000000000..eb1c490cb --- /dev/null +++ b/docs/versioned-plugins/codecs/json-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: json +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::json-v3.0.5.asciidoc[] +include::json-v3.0.4.asciidoc[] +include::json-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/json-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/json-v3.0.3.asciidoc new file mode 100644 index 000000000..af5c3c2b3 --- /dev/null +++ b/docs/versioned-plugins/codecs/json-v3.0.3.asciidoc @@ -0,0 +1,62 @@ +:plugin: json +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-json/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Json codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec may be used to decode (via inputs) and encode (via outputs) +full JSON messages. If the data being sent is a JSON array at its root multiple events will be created (one per element). + +If you are streaming JSON messages delimited +by '\n' then see the `json_lines` codec. + +Encoding will result in a compact JSON representation (no line terminators or indentation) + +If this codec recieves a payload from an input that is not valid JSON, then +it will fall back to plain text and add a tag `_jsonparsefailure`. Upon a JSON +failure, the payload will be stored in the `message` field. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Json Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this codec. Examples include "UTF-8" and +"CP1252". + +JSON requires valid UTF-8 strings, but in some cases, software that +emits JSON does so in another encoding (nxlog, for example). In +weird cases like this, you can set the `charset` setting to the +actual encoding of the text and Logstash will convert it for you. + +For nxlog users, you may to set this to "CP1252". + + diff --git a/docs/versioned-plugins/codecs/json-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/json-v3.0.4.asciidoc new file mode 100644 index 000000000..b950918dc --- /dev/null +++ b/docs/versioned-plugins/codecs/json-v3.0.4.asciidoc @@ -0,0 +1,62 @@ +:plugin: json +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-json/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Json codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec may be used to decode (via inputs) and encode (via outputs) +full JSON messages. If the data being sent is a JSON array at its root multiple events will be created (one per element). + +If you are streaming JSON messages delimited +by '\n' then see the `json_lines` codec. + +Encoding will result in a compact JSON representation (no line terminators or indentation) + +If this codec recieves a payload from an input that is not valid JSON, then +it will fall back to plain text and add a tag `_jsonparsefailure`. Upon a JSON +failure, the payload will be stored in the `message` field. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Json Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this codec. Examples include "UTF-8" and +"CP1252". + +JSON requires valid UTF-8 strings, but in some cases, software that +emits JSON does so in another encoding (nxlog, for example). In +weird cases like this, you can set the `charset` setting to the +actual encoding of the text and Logstash will convert it for you. + +For nxlog users, you may to set this to "CP1252". + + diff --git a/docs/versioned-plugins/codecs/json-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/json-v3.0.5.asciidoc new file mode 100644 index 000000000..a1a4c9865 --- /dev/null +++ b/docs/versioned-plugins/codecs/json-v3.0.5.asciidoc @@ -0,0 +1,62 @@ +:plugin: json +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-json/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Json codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec may be used to decode (via inputs) and encode (via outputs) +full JSON messages. If the data being sent is a JSON array at its root multiple events will be created (one per element). + +If you are streaming JSON messages delimited +by '\n' then see the `json_lines` codec. + +Encoding will result in a compact JSON representation (no line terminators or indentation) + +If this codec recieves a payload from an input that is not valid JSON, then +it will fall back to plain text and add a tag `_jsonparsefailure`. Upon a JSON +failure, the payload will be stored in the `message` field. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Json Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this codec. Examples include "UTF-8" and +"CP1252". + +JSON requires valid UTF-8 strings, but in some cases, software that +emits JSON does so in another encoding (nxlog, for example). In +weird cases like this, you can set the `charset` setting to the +actual encoding of the text and Logstash will convert it for you. + +For nxlog users, you may to set this to "CP1252". + + diff --git a/docs/versioned-plugins/codecs/json_lines-index.asciidoc b/docs/versioned-plugins/codecs/json_lines-index.asciidoc new file mode 100644 index 000000000..544c5fc48 --- /dev/null +++ b/docs/versioned-plugins/codecs/json_lines-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: json_lines +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::json_lines-v3.0.5.asciidoc[] +include::json_lines-v3.0.4.asciidoc[] +include::json_lines-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/json_lines-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/json_lines-v3.0.3.asciidoc new file mode 100644 index 000000000..e6d322800 --- /dev/null +++ b/docs/versioned-plugins/codecs/json_lines-v3.0.3.asciidoc @@ -0,0 +1,67 @@ +:plugin: json_lines +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-json_lines/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Json_lines codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec will decode streamed JSON that is newline delimited. +Encoding will emit a single JSON string ending in a `@delimiter` +NOTE: Do not use this codec if your source input is line-oriented JSON, for +example, redis or file inputs. Rather, use the json codec. +More info: This codec is expecting to receive a stream (string) of newline +terminated lines. The file input will produce a line string without a newline. +Therefore this codec cannot work with line oriented inputs. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Json_lines Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this codec. Examples include `UTF-8` and +`CP1252` + +JSON requires valid `UTF-8` strings, but in some cases, software that +emits JSON does so in another encoding (nxlog, for example). In +weird cases like this, you can set the charset setting to the +actual encoding of the text and logstash will convert it for you. + +For nxlog users, you'll want to set this to `CP1252` + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is <> + * Default value is `"\n"` + +Change the delimiter that separates lines + + diff --git a/docs/versioned-plugins/codecs/json_lines-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/json_lines-v3.0.4.asciidoc new file mode 100644 index 000000000..2d8b0df7f --- /dev/null +++ b/docs/versioned-plugins/codecs/json_lines-v3.0.4.asciidoc @@ -0,0 +1,67 @@ +:plugin: json_lines +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-json_lines/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Json_lines codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec will decode streamed JSON that is newline delimited. +Encoding will emit a single JSON string ending in a `@delimiter` +NOTE: Do not use this codec if your source input is line-oriented JSON, for +example, redis or file inputs. Rather, use the json codec. +More info: This codec is expecting to receive a stream (string) of newline +terminated lines. The file input will produce a line string without a newline. +Therefore this codec cannot work with line oriented inputs. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Json_lines Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this codec. Examples include `UTF-8` and +`CP1252` + +JSON requires valid `UTF-8` strings, but in some cases, software that +emits JSON does so in another encoding (nxlog, for example). In +weird cases like this, you can set the charset setting to the +actual encoding of the text and logstash will convert it for you. + +For nxlog users, you'll want to set this to `CP1252` + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is <> + * Default value is `"\n"` + +Change the delimiter that separates lines + + diff --git a/docs/versioned-plugins/codecs/json_lines-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/json_lines-v3.0.5.asciidoc new file mode 100644 index 000000000..f8031bc0b --- /dev/null +++ b/docs/versioned-plugins/codecs/json_lines-v3.0.5.asciidoc @@ -0,0 +1,67 @@ +:plugin: json_lines +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-json_lines/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Json_lines codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec will decode streamed JSON that is newline delimited. +Encoding will emit a single JSON string ending in a `@delimiter` +NOTE: Do not use this codec if your source input is line-oriented JSON, for +example, redis or file inputs. Rather, use the json codec. +More info: This codec is expecting to receive a stream (string) of newline +terminated lines. The file input will produce a line string without a newline. +Therefore this codec cannot work with line oriented inputs. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Json_lines Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this codec. Examples include `UTF-8` and +`CP1252` + +JSON requires valid `UTF-8` strings, but in some cases, software that +emits JSON does so in another encoding (nxlog, for example). In +weird cases like this, you can set the charset setting to the +actual encoding of the text and logstash will convert it for you. + +For nxlog users, you'll want to set this to `CP1252` + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is <> + * Default value is `"\n"` + +Change the delimiter that separates lines + + diff --git a/docs/versioned-plugins/codecs/json_pretty-index.asciidoc b/docs/versioned-plugins/codecs/json_pretty-index.asciidoc new file mode 100644 index 000000000..7ee81c2f9 --- /dev/null +++ b/docs/versioned-plugins/codecs/json_pretty-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: json_pretty +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/codecs/line-index.asciidoc b/docs/versioned-plugins/codecs/line-index.asciidoc new file mode 100644 index 000000000..2b4cd9dd4 --- /dev/null +++ b/docs/versioned-plugins/codecs/line-index.asciidoc @@ -0,0 +1,22 @@ +:plugin: line +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-19 +| <> | 2017-12-15 +| <> | 2017-12-12 +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::line-v3.0.8.asciidoc[] +include::line-v3.0.7.asciidoc[] +include::line-v3.0.6.asciidoc[] +include::line-v3.0.5.asciidoc[] +include::line-v3.0.4.asciidoc[] +include::line-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/line-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/line-v3.0.3.asciidoc new file mode 100644 index 000000000..3610e02c7 --- /dev/null +++ b/docs/versioned-plugins/codecs/line-v3.0.3.asciidoc @@ -0,0 +1,72 @@ +:plugin: line +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-line/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Line codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Line-oriented text data. + +Decoding behavior: Only whole line events will be emitted. + +Encoding behavior: Each event will be emitted with a trailing newline. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Line Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this input. Examples include `UTF-8` +and `cp1252` + +This setting is useful if your log files are in `Latin-1` (aka `cp1252`) +or in another character set other than `UTF-8`. + +This only affects "plain" format logs since json is `UTF-8` already. + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is <> + * Default value is `"\n"` + +Change the delimiter that separates lines + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * There is no default value for this setting. + +Set the desired text format for encoding. + + diff --git a/docs/versioned-plugins/codecs/line-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/line-v3.0.4.asciidoc new file mode 100644 index 000000000..3fd05cd70 --- /dev/null +++ b/docs/versioned-plugins/codecs/line-v3.0.4.asciidoc @@ -0,0 +1,72 @@ +:plugin: line +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-line/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Line codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Line-oriented text data. + +Decoding behavior: Only whole line events will be emitted. + +Encoding behavior: Each event will be emitted with a trailing newline. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Line Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this input. Examples include `UTF-8` +and `cp1252` + +This setting is useful if your log files are in `Latin-1` (aka `cp1252`) +or in another character set other than `UTF-8`. + +This only affects "plain" format logs since json is `UTF-8` already. + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is <> + * Default value is `"\n"` + +Change the delimiter that separates lines + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * There is no default value for this setting. + +Set the desired text format for encoding. + + diff --git a/docs/versioned-plugins/codecs/line-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/line-v3.0.5.asciidoc new file mode 100644 index 000000000..af48f373b --- /dev/null +++ b/docs/versioned-plugins/codecs/line-v3.0.5.asciidoc @@ -0,0 +1,72 @@ +:plugin: line +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-line/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Line codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Line-oriented text data. + +Decoding behavior: Only whole line events will be emitted. + +Encoding behavior: Each event will be emitted with a trailing newline. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Line Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this input. Examples include `UTF-8` +and `cp1252` + +This setting is useful if your log files are in `Latin-1` (aka `cp1252`) +or in another character set other than `UTF-8`. + +This only affects "plain" format logs since json is `UTF-8` already. + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is <> + * Default value is `"\n"` + +Change the delimiter that separates lines + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * There is no default value for this setting. + +Set the desired text format for encoding. + + diff --git a/docs/versioned-plugins/codecs/line-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/line-v3.0.6.asciidoc new file mode 100644 index 000000000..f290efd15 --- /dev/null +++ b/docs/versioned-plugins/codecs/line-v3.0.6.asciidoc @@ -0,0 +1,72 @@ +:plugin: line +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-12-12 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-line/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Line codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Line-oriented text data. + +Decoding behavior: Only whole line events will be emitted. + +Encoding behavior: Each event will be emitted with a trailing newline. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Line Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this input. Examples include `UTF-8` +and `cp1252` + +This setting is useful if your log files are in `Latin-1` (aka `cp1252`) +or in another character set other than `UTF-8`. + +This only affects "plain" format logs since json is `UTF-8` already. + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is <> + * Default value is `"\n"` + +Change the delimiter that separates lines + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * There is no default value for this setting. + +Set the desired text format for encoding. + + diff --git a/docs/versioned-plugins/codecs/line-v3.0.7.asciidoc b/docs/versioned-plugins/codecs/line-v3.0.7.asciidoc new file mode 100644 index 000000000..72d714510 --- /dev/null +++ b/docs/versioned-plugins/codecs/line-v3.0.7.asciidoc @@ -0,0 +1,72 @@ +:plugin: line +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.7 +:release_date: 2017-12-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-line/blob/v3.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Line codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Line-oriented text data. + +Decoding behavior: Only whole line events will be emitted. + +Encoding behavior: Each event will be emitted with a trailing newline. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Line Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this input. Examples include `UTF-8` +and `cp1252` + +This setting is useful if your log files are in `Latin-1` (aka `cp1252`) +or in another character set other than `UTF-8`. + +This only affects "plain" format logs since json is `UTF-8` already. + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is <> + * Default value is `"\n"` + +Change the delimiter that separates lines + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * There is no default value for this setting. + +Set the desired text format for encoding. + + diff --git a/docs/versioned-plugins/codecs/line-v3.0.8.asciidoc b/docs/versioned-plugins/codecs/line-v3.0.8.asciidoc new file mode 100644 index 000000000..2627ec8ad --- /dev/null +++ b/docs/versioned-plugins/codecs/line-v3.0.8.asciidoc @@ -0,0 +1,72 @@ +:plugin: line +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.8 +:release_date: 2017-12-19 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-line/blob/v3.0.8/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Line codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Line-oriented text data. + +Decoding behavior: Only whole line events will be emitted. + +Encoding behavior: Each event will be emitted with a trailing newline. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Line Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this input. Examples include `UTF-8` +and `cp1252` + +This setting is useful if your log files are in `Latin-1` (aka `cp1252`) +or in another character set other than `UTF-8`. + +This only affects "plain" format logs since json is `UTF-8` already. + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is <> + * Default value is `"\n"` + +Change the delimiter that separates lines + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * There is no default value for this setting. + +Set the desired text format for encoding. + + diff --git a/docs/versioned-plugins/codecs/msgpack-index.asciidoc b/docs/versioned-plugins/codecs/msgpack-index.asciidoc new file mode 100644 index 000000000..893300d30 --- /dev/null +++ b/docs/versioned-plugins/codecs/msgpack-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: msgpack +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-10-27 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::msgpack-v3.0.7.asciidoc[] +include::msgpack-v3.0.6.asciidoc[] +include::msgpack-v3.0.5.asciidoc[] +include::msgpack-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/msgpack-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/msgpack-v3.0.3.asciidoc new file mode 100644 index 000000000..090581a4d --- /dev/null +++ b/docs/versioned-plugins/codecs/msgpack-v3.0.3.asciidoc @@ -0,0 +1,44 @@ +:plugin: msgpack +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-msgpack/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Msgpack codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Msgpack Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * Default value is `nil` + + + + diff --git a/docs/versioned-plugins/codecs/msgpack-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/msgpack-v3.0.5.asciidoc new file mode 100644 index 000000000..a386d87ae --- /dev/null +++ b/docs/versioned-plugins/codecs/msgpack-v3.0.5.asciidoc @@ -0,0 +1,44 @@ +:plugin: msgpack +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-msgpack/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Msgpack codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec reads and produces MessagePack encoded content. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Msgpack Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * Default value is `nil` + + + + diff --git a/docs/versioned-plugins/codecs/msgpack-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/msgpack-v3.0.6.asciidoc new file mode 100644 index 000000000..eec715359 --- /dev/null +++ b/docs/versioned-plugins/codecs/msgpack-v3.0.6.asciidoc @@ -0,0 +1,44 @@ +:plugin: msgpack +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-10-27 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-msgpack/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Msgpack codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec reads and produces MessagePack encoded content. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Msgpack Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * Default value is `nil` + + + + diff --git a/docs/versioned-plugins/codecs/msgpack-v3.0.7.asciidoc b/docs/versioned-plugins/codecs/msgpack-v3.0.7.asciidoc new file mode 100644 index 000000000..61c6426c2 --- /dev/null +++ b/docs/versioned-plugins/codecs/msgpack-v3.0.7.asciidoc @@ -0,0 +1,44 @@ +:plugin: msgpack +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.7 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-msgpack/blob/v3.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Msgpack codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec reads and produces MessagePack encoded content. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Msgpack Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * Default value is `nil` + + + + diff --git a/docs/versioned-plugins/codecs/multiline-index.asciidoc b/docs/versioned-plugins/codecs/multiline-index.asciidoc new file mode 100644 index 000000000..cd4bda72f --- /dev/null +++ b/docs/versioned-plugins/codecs/multiline-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: multiline +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-08-07 +| <> | 2017-06-23 +|======================================================================= + +include::multiline-v3.0.8.asciidoc[] +include::multiline-v3.0.7.asciidoc[] +include::multiline-v3.0.6.asciidoc[] +include::multiline-v3.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/multiline-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/multiline-v3.0.5.asciidoc new file mode 100644 index 000000000..979dccded --- /dev/null +++ b/docs/versioned-plugins/codecs/multiline-v3.0.5.asciidoc @@ -0,0 +1,222 @@ +:plugin: multiline +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-multiline/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Multiline codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The multiline codec will collapse multiline messages and merge them into a +single event. + +IMPORTANT: If you are using a Logstash input plugin that supports multiple +hosts, such as the <> input plugin, you should not use +the multiline codec to handle multiline events. Doing so may result in the +mixing of streams and corrupted event data. In this situation, you need to +handle multiline events before sending the event data to Logstash. + +The original goal of this codec was to allow joining of multiline messages +from files into a single event. For example, joining Java exception and +stacktrace messages into a single event. + +The config looks like this: +[source,ruby] + input { + stdin { + codec => multiline { + pattern => "pattern, a regexp" + negate => "true" or "false" + what => "previous" or "next" + } + } + } + +The `pattern` should match what you believe to be an indicator that the field +is part of a multi-line event. + +The `what` must be `previous` or `next` and indicates the relation +to the multi-line event. + +The `negate` can be `true` or `false` (defaults to `false`). If `true`, a +message not matching the pattern will constitute a match of the multiline +filter and the `what` will be applied. (vice-versa is also true) + +For example, Java stack traces are multiline and usually have the message +starting at the far-left, with each subsequent line indented. Do this: +[source,ruby] + input { + stdin { + codec => multiline { + pattern => "^\s" + what => "previous" + } + } + } + +This says that any line starting with whitespace belongs to the previous line. + +Another example is to merge lines not starting with a date up to the previous +line.. +[source,ruby] + input { + file { + path => "/var/log/someapp.log" + codec => multiline { + # Grok pattern names are valid! :) + pattern => "^%{TIMESTAMP_ISO8601} " + negate => true + what => "previous" + } + } + } + +This says that any line not starting with a timestamp should be merged with the previous line. + +One more common example is C line continuations (backslash). Here's how to do that: +[source,ruby] + input { + stdin { + codec => multiline { + pattern => "\\$" + what => "next" + } + } + } + +This says that any line ending with a backslash should be combined with the +following line. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Multiline Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-auto_flush_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-max_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_lines>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-multiline_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pattern>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-what>> |<>, one of `["previous", "next"]`|Yes +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-auto_flush_interval"] +===== `auto_flush_interval` + + * Value type is <> + * There is no default value for this setting. + +The accumulation of multiple lines will be converted to an event when either a +matching new line is seen or there has been no new data appended for this many +seconds. No default. If unset, no auto_flush. Units: seconds + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this input. Examples include `UTF-8` +and `cp1252` + +This setting is useful if your log files are in `Latin-1` (aka `cp1252`) +or in another character set other than `UTF-8`. + +This only affects "plain" format logs since JSON is `UTF-8` already. + +[id="{version}-plugins-{type}s-{plugin}-max_bytes"] +===== `max_bytes` + + * Value type is <> + * Default value is `"10 MiB"` + +The accumulation of events can make logstash exit with an out of memory error +if event boundaries are not correctly defined. This settings make sure to flush +multiline events after reaching a number of bytes, it is used in combination +max_lines. + +[id="{version}-plugins-{type}s-{plugin}-max_lines"] +===== `max_lines` + + * Value type is <> + * Default value is `500` + +The accumulation of events can make logstash exit with an out of memory error +if event boundaries are not correctly defined. This settings make sure to flush +multiline events after reaching a number of lines, it is used in combination +max_bytes. + +[id="{version}-plugins-{type}s-{plugin}-multiline_tag"] +===== `multiline_tag` + + * Value type is <> + * Default value is `"multiline"` + +Tag multiline events with a given tag. This tag will only be added +to events that actually have multiple lines in them. + +[id="{version}-plugins-{type}s-{plugin}-negate"] +===== `negate` + + * Value type is <> + * Default value is `false` + +Negate the regexp pattern ('if not matched'). + +[id="{version}-plugins-{type}s-{plugin}-pattern"] +===== `pattern` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The regular expression to match. + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is <> + * Default value is `[]` + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +[id="{version}-plugins-{type}s-{plugin}-what"] +===== `what` + + * This is a required setting. + * Value can be any of: `previous`, `next` + * There is no default value for this setting. + +If the pattern matched, does event belong to the next or previous event? + + diff --git a/docs/versioned-plugins/codecs/multiline-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/multiline-v3.0.6.asciidoc new file mode 100644 index 000000000..cf15d0e8f --- /dev/null +++ b/docs/versioned-plugins/codecs/multiline-v3.0.6.asciidoc @@ -0,0 +1,222 @@ +:plugin: multiline +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-08-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-multiline/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Multiline codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The multiline codec will collapse multiline messages and merge them into a +single event. + +IMPORTANT: If you are using a Logstash input plugin that supports multiple +hosts, such as the <> input plugin, you should not use +the multiline codec to handle multiline events. Doing so may result in the +mixing of streams and corrupted event data. In this situation, you need to +handle multiline events before sending the event data to Logstash. + +The original goal of this codec was to allow joining of multiline messages +from files into a single event. For example, joining Java exception and +stacktrace messages into a single event. + +The config looks like this: +[source,ruby] + input { + stdin { + codec => multiline { + pattern => "pattern, a regexp" + negate => "true" or "false" + what => "previous" or "next" + } + } + } + +The `pattern` should match what you believe to be an indicator that the field +is part of a multi-line event. + +The `what` must be `previous` or `next` and indicates the relation +to the multi-line event. + +The `negate` can be `true` or `false` (defaults to `false`). If `true`, a +message not matching the pattern will constitute a match of the multiline +filter and the `what` will be applied. (vice-versa is also true) + +For example, Java stack traces are multiline and usually have the message +starting at the far-left, with each subsequent line indented. Do this: +[source,ruby] + input { + stdin { + codec => multiline { + pattern => "^\s" + what => "previous" + } + } + } + +This says that any line starting with whitespace belongs to the previous line. + +Another example is to merge lines not starting with a date up to the previous +line.. +[source,ruby] + input { + file { + path => "/var/log/someapp.log" + codec => multiline { + # Grok pattern names are valid! :) + pattern => "^%{TIMESTAMP_ISO8601} " + negate => true + what => "previous" + } + } + } + +This says that any line not starting with a timestamp should be merged with the previous line. + +One more common example is C line continuations (backslash). Here's how to do that: +[source,ruby] + input { + stdin { + codec => multiline { + pattern => "\\$" + what => "next" + } + } + } + +This says that any line ending with a backslash should be combined with the +following line. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Multiline Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-auto_flush_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-max_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_lines>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-multiline_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pattern>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-what>> |<>, one of `["previous", "next"]`|Yes +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-auto_flush_interval"] +===== `auto_flush_interval` + + * Value type is <> + * There is no default value for this setting. + +The accumulation of multiple lines will be converted to an event when either a +matching new line is seen or there has been no new data appended for this many +seconds. No default. If unset, no auto_flush. Units: seconds + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this input. Examples include `UTF-8` +and `cp1252` + +This setting is useful if your log files are in `Latin-1` (aka `cp1252`) +or in another character set other than `UTF-8`. + +This only affects "plain" format logs since JSON is `UTF-8` already. + +[id="{version}-plugins-{type}s-{plugin}-max_bytes"] +===== `max_bytes` + + * Value type is <> + * Default value is `"10 MiB"` + +The accumulation of events can make logstash exit with an out of memory error +if event boundaries are not correctly defined. This settings make sure to flush +multiline events after reaching a number of bytes, it is used in combination +max_lines. + +[id="{version}-plugins-{type}s-{plugin}-max_lines"] +===== `max_lines` + + * Value type is <> + * Default value is `500` + +The accumulation of events can make logstash exit with an out of memory error +if event boundaries are not correctly defined. This settings make sure to flush +multiline events after reaching a number of lines, it is used in combination +max_bytes. + +[id="{version}-plugins-{type}s-{plugin}-multiline_tag"] +===== `multiline_tag` + + * Value type is <> + * Default value is `"multiline"` + +Tag multiline events with a given tag. This tag will only be added +to events that actually have multiple lines in them. + +[id="{version}-plugins-{type}s-{plugin}-negate"] +===== `negate` + + * Value type is <> + * Default value is `false` + +Negate the regexp pattern ('if not matched'). + +[id="{version}-plugins-{type}s-{plugin}-pattern"] +===== `pattern` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The regular expression to match. + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is <> + * Default value is `[]` + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +[id="{version}-plugins-{type}s-{plugin}-what"] +===== `what` + + * This is a required setting. + * Value can be any of: `previous`, `next` + * There is no default value for this setting. + +If the pattern matched, does event belong to the next or previous event? + + diff --git a/docs/versioned-plugins/codecs/multiline-v3.0.7.asciidoc b/docs/versioned-plugins/codecs/multiline-v3.0.7.asciidoc new file mode 100644 index 000000000..83af451cf --- /dev/null +++ b/docs/versioned-plugins/codecs/multiline-v3.0.7.asciidoc @@ -0,0 +1,222 @@ +:plugin: multiline +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.7 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-multiline/blob/v3.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Multiline codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The multiline codec will collapse multiline messages and merge them into a +single event. + +IMPORTANT: If you are using a Logstash input plugin that supports multiple +hosts, such as the <> input plugin, you should not use +the multiline codec to handle multiline events. Doing so may result in the +mixing of streams and corrupted event data. In this situation, you need to +handle multiline events before sending the event data to Logstash. + +The original goal of this codec was to allow joining of multiline messages +from files into a single event. For example, joining Java exception and +stacktrace messages into a single event. + +The config looks like this: +[source,ruby] + input { + stdin { + codec => multiline { + pattern => "pattern, a regexp" + negate => "true" or "false" + what => "previous" or "next" + } + } + } + +The `pattern` should match what you believe to be an indicator that the field +is part of a multi-line event. + +The `what` must be `previous` or `next` and indicates the relation +to the multi-line event. + +The `negate` can be `true` or `false` (defaults to `false`). If `true`, a +message not matching the pattern will constitute a match of the multiline +filter and the `what` will be applied. (vice-versa is also true) + +For example, Java stack traces are multiline and usually have the message +starting at the far-left, with each subsequent line indented. Do this: +[source,ruby] + input { + stdin { + codec => multiline { + pattern => "^\s" + what => "previous" + } + } + } + +This says that any line starting with whitespace belongs to the previous line. + +Another example is to merge lines not starting with a date up to the previous +line.. +[source,ruby] + input { + file { + path => "/var/log/someapp.log" + codec => multiline { + # Grok pattern names are valid! :) + pattern => "^%{TIMESTAMP_ISO8601} " + negate => true + what => "previous" + } + } + } + +This says that any line not starting with a timestamp should be merged with the previous line. + +One more common example is C line continuations (backslash). Here's how to do that: +[source,ruby] + input { + stdin { + codec => multiline { + pattern => "\\$" + what => "next" + } + } + } + +This says that any line ending with a backslash should be combined with the +following line. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Multiline Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-auto_flush_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-max_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_lines>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-multiline_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pattern>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-what>> |<>, one of `["previous", "next"]`|Yes +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-auto_flush_interval"] +===== `auto_flush_interval` + + * Value type is <> + * There is no default value for this setting. + +The accumulation of multiple lines will be converted to an event when either a +matching new line is seen or there has been no new data appended for this many +seconds. No default. If unset, no auto_flush. Units: seconds + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this input. Examples include `UTF-8` +and `cp1252` + +This setting is useful if your log files are in `Latin-1` (aka `cp1252`) +or in another character set other than `UTF-8`. + +This only affects "plain" format logs since JSON is `UTF-8` already. + +[id="{version}-plugins-{type}s-{plugin}-max_bytes"] +===== `max_bytes` + + * Value type is <> + * Default value is `"10 MiB"` + +The accumulation of events can make logstash exit with an out of memory error +if event boundaries are not correctly defined. This settings make sure to flush +multiline events after reaching a number of bytes, it is used in combination +max_lines. + +[id="{version}-plugins-{type}s-{plugin}-max_lines"] +===== `max_lines` + + * Value type is <> + * Default value is `500` + +The accumulation of events can make logstash exit with an out of memory error +if event boundaries are not correctly defined. This settings make sure to flush +multiline events after reaching a number of lines, it is used in combination +max_bytes. + +[id="{version}-plugins-{type}s-{plugin}-multiline_tag"] +===== `multiline_tag` + + * Value type is <> + * Default value is `"multiline"` + +Tag multiline events with a given tag. This tag will only be added +to events that actually have multiple lines in them. + +[id="{version}-plugins-{type}s-{plugin}-negate"] +===== `negate` + + * Value type is <> + * Default value is `false` + +Negate the regexp pattern ('if not matched'). + +[id="{version}-plugins-{type}s-{plugin}-pattern"] +===== `pattern` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The regular expression to match. + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is <> + * Default value is `[]` + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +[id="{version}-plugins-{type}s-{plugin}-what"] +===== `what` + + * This is a required setting. + * Value can be any of: `previous`, `next` + * There is no default value for this setting. + +If the pattern matched, does event belong to the next or previous event? + + diff --git a/docs/versioned-plugins/codecs/multiline-v3.0.8.asciidoc b/docs/versioned-plugins/codecs/multiline-v3.0.8.asciidoc new file mode 100644 index 000000000..a67f3675b --- /dev/null +++ b/docs/versioned-plugins/codecs/multiline-v3.0.8.asciidoc @@ -0,0 +1,222 @@ +:plugin: multiline +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.8 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-multiline/blob/v3.0.8/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Multiline codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The multiline codec will collapse multiline messages and merge them into a +single event. + +IMPORTANT: If you are using a Logstash input plugin that supports multiple +hosts, such as the <> input plugin, you should not use +the multiline codec to handle multiline events. Doing so may result in the +mixing of streams and corrupted event data. In this situation, you need to +handle multiline events before sending the event data to Logstash. + +The original goal of this codec was to allow joining of multiline messages +from files into a single event. For example, joining Java exception and +stacktrace messages into a single event. + +The config looks like this: +[source,ruby] + input { + stdin { + codec => multiline { + pattern => "pattern, a regexp" + negate => "true" or "false" + what => "previous" or "next" + } + } + } + +The `pattern` should match what you believe to be an indicator that the field +is part of a multi-line event. + +The `what` must be `previous` or `next` and indicates the relation +to the multi-line event. + +The `negate` can be `true` or `false` (defaults to `false`). If `true`, a +message not matching the pattern will constitute a match of the multiline +filter and the `what` will be applied. (vice-versa is also true) + +For example, Java stack traces are multiline and usually have the message +starting at the far-left, with each subsequent line indented. Do this: +[source,ruby] + input { + stdin { + codec => multiline { + pattern => "^\s" + what => "previous" + } + } + } + +This says that any line starting with whitespace belongs to the previous line. + +Another example is to merge lines not starting with a date up to the previous +line.. +[source,ruby] + input { + file { + path => "/var/log/someapp.log" + codec => multiline { + # Grok pattern names are valid! :) + pattern => "^%{TIMESTAMP_ISO8601} " + negate => true + what => "previous" + } + } + } + +This says that any line not starting with a timestamp should be merged with the previous line. + +One more common example is C line continuations (backslash). Here's how to do that: +[source,ruby] + input { + stdin { + codec => multiline { + pattern => "\\$" + what => "next" + } + } + } + +This says that any line ending with a backslash should be combined with the +following line. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Multiline Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-auto_flush_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-max_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_lines>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-multiline_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pattern>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-what>> |<>, one of `["previous", "next"]`|Yes +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-auto_flush_interval"] +===== `auto_flush_interval` + + * Value type is <> + * There is no default value for this setting. + +The accumulation of multiple lines will be converted to an event when either a +matching new line is seen or there has been no new data appended for this many +seconds. No default. If unset, no auto_flush. Units: seconds + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this input. Examples include `UTF-8` +and `cp1252` + +This setting is useful if your log files are in `Latin-1` (aka `cp1252`) +or in another character set other than `UTF-8`. + +This only affects "plain" format logs since JSON is `UTF-8` already. + +[id="{version}-plugins-{type}s-{plugin}-max_bytes"] +===== `max_bytes` + + * Value type is <> + * Default value is `"10 MiB"` + +The accumulation of events can make logstash exit with an out of memory error +if event boundaries are not correctly defined. This settings make sure to flush +multiline events after reaching a number of bytes, it is used in combination +max_lines. + +[id="{version}-plugins-{type}s-{plugin}-max_lines"] +===== `max_lines` + + * Value type is <> + * Default value is `500` + +The accumulation of events can make logstash exit with an out of memory error +if event boundaries are not correctly defined. This settings make sure to flush +multiline events after reaching a number of lines, it is used in combination +max_bytes. + +[id="{version}-plugins-{type}s-{plugin}-multiline_tag"] +===== `multiline_tag` + + * Value type is <> + * Default value is `"multiline"` + +Tag multiline events with a given tag. This tag will only be added +to events that actually have multiple lines in them. + +[id="{version}-plugins-{type}s-{plugin}-negate"] +===== `negate` + + * Value type is <> + * Default value is `false` + +Negate the regexp pattern ('if not matched'). + +[id="{version}-plugins-{type}s-{plugin}-pattern"] +===== `pattern` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The regular expression to match. + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is <> + * Default value is `[]` + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +[id="{version}-plugins-{type}s-{plugin}-what"] +===== `what` + + * This is a required setting. + * Value can be any of: `previous`, `next` + * There is no default value for this setting. + +If the pattern matched, does event belong to the next or previous event? + + diff --git a/docs/versioned-plugins/codecs/netflow-index.asciidoc b/docs/versioned-plugins/codecs/netflow-index.asciidoc new file mode 100644 index 000000000..c0f1a626f --- /dev/null +++ b/docs/versioned-plugins/codecs/netflow-index.asciidoc @@ -0,0 +1,34 @@ +:plugin: netflow +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-30 +| <> | 2017-12-03 +| <> | 2017-11-23 +| <> | 2017-11-19 +| <> | 2017-11-11 +| <> | 2017-11-07 +| <> | 2017-09-29 +| <> | 2017-09-28 +| <> | 2017-08-15 +| <> | 2017-07-18 +| <> | 2017-06-23 +| <> | 2017-06-23 +|======================================================================= + +include::netflow-v3.9.1.asciidoc[] +include::netflow-v3.9.0.asciidoc[] +include::netflow-v3.8.3.asciidoc[] +include::netflow-v3.8.1.asciidoc[] +include::netflow-v3.8.0.asciidoc[] +include::netflow-v3.7.1.asciidoc[] +include::netflow-v3.7.0.asciidoc[] +include::netflow-v3.6.0.asciidoc[] +include::netflow-v3.5.2.asciidoc[] +include::netflow-v3.5.1.asciidoc[] +include::netflow-v3.5.0.asciidoc[] +include::netflow-v3.4.1.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/netflow-v3.10.0.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.10.0.asciidoc new file mode 100644 index 000000000..cce6a4612 --- /dev/null +++ b/docs/versioned-plugins/codecs/netflow-v3.10.0.asciidoc @@ -0,0 +1,210 @@ +:plugin: netflow +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.10.0 +:release_date: 2017-12-30 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.10.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Netflow codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. + +==== Supported Netflow/IPFIX exporters + +This codec supports: + +* Netflow v5 +* Netflow v9 +* IPFIX + +The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: + +[cols="6,^2,^2,^2,12",options="header"] +|=========================================================================================== +|Netflow exporter | v5 | v9 | IPFIX | Remarks +|Barracuda Firewall | | | y | +|Cisco ASA | | y | | +|Cisco ASR 1k | | | n | Fails because of duplicate fields +|Cisco ASR 9k | | y | | +|Cisco IOS 12.x | | y | | +|Cisco ISR w/ HSL | | n | | Fails because of duplicate fields, see: https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 +|Cisco WLC | | y | | +|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown +|fprobe | y | | | +|Fortigate FortiOS | | y | | +|ipt_NETFLOW | y | y | y | +|Juniper MX80 | y | | | SW > 12.3R8 +|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow +|nProbe | y | y | y | L7 DPI fields now also supported +|Nokia BRAS | | | y | +|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 +|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd +|Streamcore Streamgroomer | | y | | +|Palo Alto PAN-OS | | y | | +|Ubiquiti Edgerouter X | | y | | With MPLS labels +|VMware VDS | | | y | Still some unknown fields +|YAF | | | y | With silk and applabel, but no DPI plugin support +|vIPtela | | | y | +|=========================================================================================== + +==== Usage + +Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + } +} +-------------------------- + +For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + receive_buffer_bytes => 16777216 + workers => 16 + } +-------------------------- + +To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: + + # sysctl -w net.core.rmem_max=$((1024*1024*16)) + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Netflow Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] +===== `cache_save_path` + + * Value type is <> + * There is no default value for this setting. + +Enables the template cache and saves it in the specified directory. This +minimizes data loss after Logstash restarts because the codec doesn't have to +wait for the arrival of templates, but instead reload already received +templates received during previous runs. + +Template caches are saved as: + +* <>/netflow_templates.cache for Netflow v9 templates. +* <>/ipfix_templates.cache for IPFIX templates. + +[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] +===== `cache_ttl` + + * Value type is <> + * Default value is `4000` + +Netflow v9/v10 template cache TTL (seconds) + +[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] +===== `include_flowset_id` + + * Value type is <> + * Default value is `false` + +Only makes sense for ipfix, v9 already includes this +Setting to true will include the flowset_id in events +Allows you to work with sequences, for instance with the aggregate filter + +[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] +===== `ipfix_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing IPFIX field definitions + +Very similar to the Netflow version except there is a top level Private +Enterprise Number (PEN) key added: + +[source,yaml] +-------------------------- +pen: +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +There is an implicit PEN 0 for the standard fields. + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] +===== `netflow_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing Netflow field definitions + +Each Netflow field is defined like so: + +[source,yaml] +-------------------------- +id: +- default length in bytes +- :name +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"netflow"` + +Specify into what field you want the Netflow data. + +[id="{version}-plugins-{type}s-{plugin}-versions"] +===== `versions` + + * Value type is <> + * Default value is `[5, 9, 10]` + +Specify which Netflow versions you will accept. + + diff --git a/docs/versioned-plugins/codecs/netflow-v3.4.1.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.4.1.asciidoc new file mode 100644 index 000000000..e453dbfda --- /dev/null +++ b/docs/versioned-plugins/codecs/netflow-v3.4.1.asciidoc @@ -0,0 +1,192 @@ +:plugin: netflow +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.4.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.4.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Netflow codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. + +==== Supported Netflow/IPFIX exporters + +The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: + +[cols="6,^2,^2,^2,12",options="header"] +|=========================================================================================== +|Netflow exporter | v5 | v9 | IPFIX | Remarks +|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd +|nProbe | y | y | y | +|ipt_NETFLOW | y | y | y | +|Cisco ASA | | y | | +|Cisco IOS 12.x | | y | | +|fprobe | y | | | +|Juniper MX80 | y | | | SW > 12.3R8 +|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 +|Mikrotik 6.35.4 | y | | n | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow +|Ubiquiti Edgerouter X | | y | | With MPLS labels +|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown +|=========================================================================================== + +==== Usage + +Example Logstash configuration: + +[source, ruby] +-------------------------- +input { + udp { + host => localhost + port => 2055 + codec => netflow { + versions => [5, 9] + } + type => netflow + } + udp { + host => localhost + port => 4739 + codec => netflow { + versions => [10] + target => ipfix + } + type => ipfix + } + tcp { + host => localhost + port => 4739 + codec => netflow { + versions => [10] + target => ipfix + } + type => ipfix + } +} +-------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Netflow Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] +===== `cache_save_path` + + * Value type is <> + * There is no default value for this setting. + +Where to save the template cache +This helps speed up processing when restarting logstash +(So you don't have to await the arrival of templates) +cache will save as path/netflow_templates.cache and/or path/ipfix_templates.cache + +[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] +===== `cache_ttl` + + * Value type is <> + * Default value is `4000` + +Netflow v9/v10 template cache TTL (minutes) + +[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] +===== `include_flowset_id` + + * Value type is <> + * Default value is `false` + +Only makes sense for ipfix, v9 already includes this +Setting to true will include the flowset_id in events +Allows you to work with sequences, for instance with the aggregate filter + +[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] +===== `ipfix_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing IPFIX field definitions + +Very similar to the Netflow version except there is a top level Private +Enterprise Number (PEN) key added: + +[source,yaml] +-------------------------- +pen: +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +There is an implicit PEN 0 for the standard fields. + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] +===== `netflow_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing Netflow field definitions + +Each Netflow field is defined like so: + +[source,yaml] +-------------------------- +id: +- default length in bytes +- :name +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"netflow"` + +Specify into what field you want the Netflow data. + +[id="{version}-plugins-{type}s-{plugin}-versions"] +===== `versions` + + * Value type is <> + * Default value is `[5, 9, 10]` + +Specify which Netflow versions you will accept. + + diff --git a/docs/versioned-plugins/codecs/netflow-v3.5.0.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.5.0.asciidoc new file mode 100644 index 000000000..f7d2b9fa9 --- /dev/null +++ b/docs/versioned-plugins/codecs/netflow-v3.5.0.asciidoc @@ -0,0 +1,192 @@ +:plugin: netflow +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.5.0 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.5.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Netflow codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. + +==== Supported Netflow/IPFIX exporters + +The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: + +[cols="6,^2,^2,^2,12",options="header"] +|=========================================================================================== +|Netflow exporter | v5 | v9 | IPFIX | Remarks +|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd +|nProbe | y | y | y | +|ipt_NETFLOW | y | y | y | +|Cisco ASA | | y | | +|Cisco IOS 12.x | | y | | +|fprobe | y | | | +|Juniper MX80 | y | | | SW > 12.3R8 +|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 +|Mikrotik 6.35.4 | y | | n | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow +|Ubiquiti Edgerouter X | | y | | With MPLS labels +|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown +|=========================================================================================== + +==== Usage + +Example Logstash configuration: + +[source, ruby] +-------------------------- +input { + udp { + host => localhost + port => 2055 + codec => netflow { + versions => [5, 9] + } + type => netflow + } + udp { + host => localhost + port => 4739 + codec => netflow { + versions => [10] + target => ipfix + } + type => ipfix + } + tcp { + host => localhost + port => 4739 + codec => netflow { + versions => [10] + target => ipfix + } + type => ipfix + } +} +-------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Netflow Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] +===== `cache_save_path` + + * Value type is <> + * There is no default value for this setting. + +Where to save the template cache +This helps speed up processing when restarting logstash +(So you don't have to await the arrival of templates) +cache will save as path/netflow_templates.cache and/or path/ipfix_templates.cache + +[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] +===== `cache_ttl` + + * Value type is <> + * Default value is `4000` + +Netflow v9/v10 template cache TTL (minutes) + +[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] +===== `include_flowset_id` + + * Value type is <> + * Default value is `false` + +Only makes sense for ipfix, v9 already includes this +Setting to true will include the flowset_id in events +Allows you to work with sequences, for instance with the aggregate filter + +[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] +===== `ipfix_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing IPFIX field definitions + +Very similar to the Netflow version except there is a top level Private +Enterprise Number (PEN) key added: + +[source,yaml] +-------------------------- +pen: +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +There is an implicit PEN 0 for the standard fields. + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] +===== `netflow_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing Netflow field definitions + +Each Netflow field is defined like so: + +[source,yaml] +-------------------------- +id: +- default length in bytes +- :name +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"netflow"` + +Specify into what field you want the Netflow data. + +[id="{version}-plugins-{type}s-{plugin}-versions"] +===== `versions` + + * Value type is <> + * Default value is `[5, 9, 10]` + +Specify which Netflow versions you will accept. + + diff --git a/docs/versioned-plugins/codecs/netflow-v3.5.1.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.5.1.asciidoc new file mode 100644 index 000000000..f3bef014b --- /dev/null +++ b/docs/versioned-plugins/codecs/netflow-v3.5.1.asciidoc @@ -0,0 +1,187 @@ +:plugin: netflow +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.5.1 +:release_date: 2017-07-18 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.5.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Netflow codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. + +==== Supported Netflow/IPFIX exporters + +This codec supports: + +* Netflow v5 +* Netflow v9 +* IPFIX + +The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: + +[cols="6,^2,^2,^2,12",options="header"] +|=========================================================================================== +|Netflow exporter | v5 | v9 | IPFIX | Remarks +|Barracuda Firewall | | | y | +|Cisco ASA | | y | | +|Cisco ASR | | y | | +|Cisco IOS 12.x | | y | | +|Cisco WLC | | y | | +|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown +|fprobe | y | | | +|Fortigate FortiOS 5.2 | | y | | +|ipt_NETFLOW | y | y | y | +|Juniper MX80 | y | | | SW > 12.3R8 +|Mikrotik 6.35.4 | y | | n | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow +|nProbe | y | y | y | +|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 +|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd +|Streamcore Streamgroomer | | y | | +|Ubiquiti Edgerouter X | | y | | With MPLS labels +|VMware VDS | | | y | Still some unknown fields +|=========================================================================================== + +==== Usage + +Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + } +} +-------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Netflow Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] +===== `cache_save_path` + + * Value type is <> + * There is no default value for this setting. + +Enables the template cache and saves it in the specified directory. This +minimizes data loss after Logstash restarts because the codec doesn't have to +wait for the arrival of templates, but instead reload already received +templates received during previous runs. + +Template caches are saved as: + +* <>/netflow_templates.cache for Netflow v9 templates. +* <>/ipfix_templates.cache for IPFIX templates. + +[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] +===== `cache_ttl` + + * Value type is <> + * Default value is `4000` + +Netflow v9/v10 template cache TTL (minutes) + +[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] +===== `include_flowset_id` + + * Value type is <> + * Default value is `false` + +Only makes sense for ipfix, v9 already includes this +Setting to true will include the flowset_id in events +Allows you to work with sequences, for instance with the aggregate filter + +[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] +===== `ipfix_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing IPFIX field definitions + +Very similar to the Netflow version except there is a top level Private +Enterprise Number (PEN) key added: + +[source,yaml] +-------------------------- +pen: +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +There is an implicit PEN 0 for the standard fields. + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] +===== `netflow_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing Netflow field definitions + +Each Netflow field is defined like so: + +[source,yaml] +-------------------------- +id: +- default length in bytes +- :name +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"netflow"` + +Specify into what field you want the Netflow data. + +[id="{version}-plugins-{type}s-{plugin}-versions"] +===== `versions` + + * Value type is <> + * Default value is `[5, 9, 10]` + +Specify which Netflow versions you will accept. + + diff --git a/docs/versioned-plugins/codecs/netflow-v3.5.2.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.5.2.asciidoc new file mode 100644 index 000000000..520963634 --- /dev/null +++ b/docs/versioned-plugins/codecs/netflow-v3.5.2.asciidoc @@ -0,0 +1,206 @@ +:plugin: netflow +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.5.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.5.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Netflow codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. + +==== Supported Netflow/IPFIX exporters + +This codec supports: + +* Netflow v5 +* Netflow v9 +* IPFIX + +The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: + +[cols="6,^2,^2,^2,12",options="header"] +|=========================================================================================== +|Netflow exporter | v5 | v9 | IPFIX | Remarks +|Barracuda Firewall | | | y | +|Cisco ASA | | y | | +|Cisco ASR | | y | | +|Cisco IOS 12.x | | y | | +|Cisco WLC | | y | | +|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown +|fprobe | y | | | +|Fortigate FortiOS | | y | | +|ipt_NETFLOW | y | y | y | +|Juniper MX80 | y | | | SW > 12.3R8 +|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow +|nProbe | y | y | y | +|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 +|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd +|Streamcore Streamgroomer | | y | | +|Ubiquiti Edgerouter X | | y | | With MPLS labels +|VMware VDS | | | y | Still some unknown fields +|=========================================================================================== + +==== Usage + +Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + } +} +-------------------------- + +For high-performance production environments the configuration below will decode up to 6000 flows/sec on an 8 CPU instance. If your total flowrate exceeds 6000 flows/sec, you should use multiple Logstash instances. + + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + receive_buffer_bytes => 16777216 + codec => netflow + workers => 6 + } +-------------------------- + +Make sure to increase the Linux kernel receive buffer limit: + + # sysctl -w net.core.rmem_max=$((1024*1024*16)) + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Netflow Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] +===== `cache_save_path` + + * Value type is <> + * There is no default value for this setting. + +Enables the template cache and saves it in the specified directory. This +minimizes data loss after Logstash restarts because the codec doesn't have to +wait for the arrival of templates, but instead reload already received +templates received during previous runs. + +Template caches are saved as: + +* <>/netflow_templates.cache for Netflow v9 templates. +* <>/ipfix_templates.cache for IPFIX templates. + +[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] +===== `cache_ttl` + + * Value type is <> + * Default value is `4000` + +Netflow v9/v10 template cache TTL (seconds) + +[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] +===== `include_flowset_id` + + * Value type is <> + * Default value is `false` + +Only makes sense for ipfix, v9 already includes this +Setting to true will include the flowset_id in events +Allows you to work with sequences, for instance with the aggregate filter + +[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] +===== `ipfix_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing IPFIX field definitions + +Very similar to the Netflow version except there is a top level Private +Enterprise Number (PEN) key added: + +[source,yaml] +-------------------------- +pen: +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +There is an implicit PEN 0 for the standard fields. + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] +===== `netflow_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing Netflow field definitions + +Each Netflow field is defined like so: + +[source,yaml] +-------------------------- +id: +- default length in bytes +- :name +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"netflow"` + +Specify into what field you want the Netflow data. + +[id="{version}-plugins-{type}s-{plugin}-versions"] +===== `versions` + + * Value type is <> + * Default value is `[5, 9, 10]` + +Specify which Netflow versions you will accept. + + diff --git a/docs/versioned-plugins/codecs/netflow-v3.6.0.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.6.0.asciidoc new file mode 100644 index 000000000..aa1fd859a --- /dev/null +++ b/docs/versioned-plugins/codecs/netflow-v3.6.0.asciidoc @@ -0,0 +1,205 @@ +:plugin: netflow +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.6.0 +:release_date: 2017-09-28 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.6.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Netflow codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. + +==== Supported Netflow/IPFIX exporters + +This codec supports: + +* Netflow v5 +* Netflow v9 +* IPFIX + +The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: + +[cols="6,^2,^2,^2,12",options="header"] +|=========================================================================================== +|Netflow exporter | v5 | v9 | IPFIX | Remarks +|Barracuda Firewall | | | y | +|Cisco ASA | | y | | +|Cisco ASR | | y | | +|Cisco IOS 12.x | | y | | +|Cisco ISR w/ HSL | | n | | https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 +|Cisco WLC | | y | | +|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown +|fprobe | y | | | +|Fortigate FortiOS | | y | | +|ipt_NETFLOW | y | y | y | +|Juniper MX80 | y | | | SW > 12.3R8 +|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow +|nProbe | y | y | y | L7 DPI fields now also supported +|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 +|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd +|Streamcore Streamgroomer | | y | | +|Ubiquiti Edgerouter X | | y | | With MPLS labels +|VMware VDS | | | y | Still some unknown fields +|=========================================================================================== + +==== Usage + +Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + } +} +-------------------------- + +For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + receive_buffer_bytes => 16777216 + workers => 16 + } +-------------------------- + +To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: + + # sysctl -w net.core.rmem_max=$((1024*1024*16)) + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Netflow Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] +===== `cache_save_path` + + * Value type is <> + * There is no default value for this setting. + +Enables the template cache and saves it in the specified directory. This +minimizes data loss after Logstash restarts because the codec doesn't have to +wait for the arrival of templates, but instead reload already received +templates received during previous runs. + +Template caches are saved as: + +* <>/netflow_templates.cache for Netflow v9 templates. +* <>/ipfix_templates.cache for IPFIX templates. + +[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] +===== `cache_ttl` + + * Value type is <> + * Default value is `4000` + +Netflow v9/v10 template cache TTL (seconds) + +[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] +===== `include_flowset_id` + + * Value type is <> + * Default value is `false` + +Only makes sense for ipfix, v9 already includes this +Setting to true will include the flowset_id in events +Allows you to work with sequences, for instance with the aggregate filter + +[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] +===== `ipfix_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing IPFIX field definitions + +Very similar to the Netflow version except there is a top level Private +Enterprise Number (PEN) key added: + +[source,yaml] +-------------------------- +pen: +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +There is an implicit PEN 0 for the standard fields. + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] +===== `netflow_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing Netflow field definitions + +Each Netflow field is defined like so: + +[source,yaml] +-------------------------- +id: +- default length in bytes +- :name +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"netflow"` + +Specify into what field you want the Netflow data. + +[id="{version}-plugins-{type}s-{plugin}-versions"] +===== `versions` + + * Value type is <> + * Default value is `[5, 9, 10]` + +Specify which Netflow versions you will accept. + + diff --git a/docs/versioned-plugins/codecs/netflow-v3.7.0.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.7.0.asciidoc new file mode 100644 index 000000000..7a8f2deff --- /dev/null +++ b/docs/versioned-plugins/codecs/netflow-v3.7.0.asciidoc @@ -0,0 +1,205 @@ +:plugin: netflow +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.7.0 +:release_date: 2017-09-29 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.7.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Netflow codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. + +==== Supported Netflow/IPFIX exporters + +This codec supports: + +* Netflow v5 +* Netflow v9 +* IPFIX + +The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: + +[cols="6,^2,^2,^2,12",options="header"] +|=========================================================================================== +|Netflow exporter | v5 | v9 | IPFIX | Remarks +|Barracuda Firewall | | | y | +|Cisco ASA | | y | | +|Cisco ASR | | y | | +|Cisco IOS 12.x | | y | | +|Cisco ISR w/ HSL | | n | | https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 +|Cisco WLC | | y | | +|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown +|fprobe | y | | | +|Fortigate FortiOS | | y | | +|ipt_NETFLOW | y | y | y | +|Juniper MX80 | y | | | SW > 12.3R8 +|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow +|nProbe | y | y | y | L7 DPI fields now also supported +|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 +|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd +|Streamcore Streamgroomer | | y | | +|Ubiquiti Edgerouter X | | y | | With MPLS labels +|VMware VDS | | | y | Still some unknown fields +|=========================================================================================== + +==== Usage + +Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + } +} +-------------------------- + +For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + receive_buffer_bytes => 16777216 + workers => 16 + } +-------------------------- + +To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: + + # sysctl -w net.core.rmem_max=$((1024*1024*16)) + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Netflow Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] +===== `cache_save_path` + + * Value type is <> + * There is no default value for this setting. + +Enables the template cache and saves it in the specified directory. This +minimizes data loss after Logstash restarts because the codec doesn't have to +wait for the arrival of templates, but instead reload already received +templates received during previous runs. + +Template caches are saved as: + +* <>/netflow_templates.cache for Netflow v9 templates. +* <>/ipfix_templates.cache for IPFIX templates. + +[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] +===== `cache_ttl` + + * Value type is <> + * Default value is `4000` + +Netflow v9/v10 template cache TTL (seconds) + +[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] +===== `include_flowset_id` + + * Value type is <> + * Default value is `false` + +Only makes sense for ipfix, v9 already includes this +Setting to true will include the flowset_id in events +Allows you to work with sequences, for instance with the aggregate filter + +[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] +===== `ipfix_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing IPFIX field definitions + +Very similar to the Netflow version except there is a top level Private +Enterprise Number (PEN) key added: + +[source,yaml] +-------------------------- +pen: +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +There is an implicit PEN 0 for the standard fields. + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] +===== `netflow_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing Netflow field definitions + +Each Netflow field is defined like so: + +[source,yaml] +-------------------------- +id: +- default length in bytes +- :name +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"netflow"` + +Specify into what field you want the Netflow data. + +[id="{version}-plugins-{type}s-{plugin}-versions"] +===== `versions` + + * Value type is <> + * Default value is `[5, 9, 10]` + +Specify which Netflow versions you will accept. + + diff --git a/docs/versioned-plugins/codecs/netflow-v3.7.1.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.7.1.asciidoc new file mode 100644 index 000000000..64993ba08 --- /dev/null +++ b/docs/versioned-plugins/codecs/netflow-v3.7.1.asciidoc @@ -0,0 +1,205 @@ +:plugin: netflow +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.7.1 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.7.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Netflow codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. + +==== Supported Netflow/IPFIX exporters + +This codec supports: + +* Netflow v5 +* Netflow v9 +* IPFIX + +The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: + +[cols="6,^2,^2,^2,12",options="header"] +|=========================================================================================== +|Netflow exporter | v5 | v9 | IPFIX | Remarks +|Barracuda Firewall | | | y | +|Cisco ASA | | y | | +|Cisco ASR | | y | | +|Cisco IOS 12.x | | y | | +|Cisco ISR w/ HSL | | n | | https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 +|Cisco WLC | | y | | +|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown +|fprobe | y | | | +|Fortigate FortiOS | | y | | +|ipt_NETFLOW | y | y | y | +|Juniper MX80 | y | | | SW > 12.3R8 +|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow +|nProbe | y | y | y | L7 DPI fields now also supported +|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 +|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd +|Streamcore Streamgroomer | | y | | +|Ubiquiti Edgerouter X | | y | | With MPLS labels +|VMware VDS | | | y | Still some unknown fields +|=========================================================================================== + +==== Usage + +Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + } +} +-------------------------- + +For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + receive_buffer_bytes => 16777216 + workers => 16 + } +-------------------------- + +To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: + + # sysctl -w net.core.rmem_max=$((1024*1024*16)) + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Netflow Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] +===== `cache_save_path` + + * Value type is <> + * There is no default value for this setting. + +Enables the template cache and saves it in the specified directory. This +minimizes data loss after Logstash restarts because the codec doesn't have to +wait for the arrival of templates, but instead reload already received +templates received during previous runs. + +Template caches are saved as: + +* <>/netflow_templates.cache for Netflow v9 templates. +* <>/ipfix_templates.cache for IPFIX templates. + +[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] +===== `cache_ttl` + + * Value type is <> + * Default value is `4000` + +Netflow v9/v10 template cache TTL (seconds) + +[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] +===== `include_flowset_id` + + * Value type is <> + * Default value is `false` + +Only makes sense for ipfix, v9 already includes this +Setting to true will include the flowset_id in events +Allows you to work with sequences, for instance with the aggregate filter + +[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] +===== `ipfix_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing IPFIX field definitions + +Very similar to the Netflow version except there is a top level Private +Enterprise Number (PEN) key added: + +[source,yaml] +-------------------------- +pen: +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +There is an implicit PEN 0 for the standard fields. + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] +===== `netflow_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing Netflow field definitions + +Each Netflow field is defined like so: + +[source,yaml] +-------------------------- +id: +- default length in bytes +- :name +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"netflow"` + +Specify into what field you want the Netflow data. + +[id="{version}-plugins-{type}s-{plugin}-versions"] +===== `versions` + + * Value type is <> + * Default value is `[5, 9, 10]` + +Specify which Netflow versions you will accept. + + diff --git a/docs/versioned-plugins/codecs/netflow-v3.8.0.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.8.0.asciidoc new file mode 100644 index 000000000..1b24368d1 --- /dev/null +++ b/docs/versioned-plugins/codecs/netflow-v3.8.0.asciidoc @@ -0,0 +1,205 @@ +:plugin: netflow +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.8.0 +:release_date: 2017-11-11 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.8.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Netflow codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. + +==== Supported Netflow/IPFIX exporters + +This codec supports: + +* Netflow v5 +* Netflow v9 +* IPFIX + +The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: + +[cols="6,^2,^2,^2,12",options="header"] +|=========================================================================================== +|Netflow exporter | v5 | v9 | IPFIX | Remarks +|Barracuda Firewall | | | y | +|Cisco ASA | | y | | +|Cisco ASR | | y | | +|Cisco IOS 12.x | | y | | +|Cisco ISR w/ HSL | | n | | https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 +|Cisco WLC | | y | | +|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown +|fprobe | y | | | +|Fortigate FortiOS | | y | | +|ipt_NETFLOW | y | y | y | +|Juniper MX80 | y | | | SW > 12.3R8 +|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow +|nProbe | y | y | y | L7 DPI fields now also supported +|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 +|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd +|Streamcore Streamgroomer | | y | | +|Ubiquiti Edgerouter X | | y | | With MPLS labels +|VMware VDS | | | y | Still some unknown fields +|=========================================================================================== + +==== Usage + +Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + } +} +-------------------------- + +For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + receive_buffer_bytes => 16777216 + workers => 16 + } +-------------------------- + +To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: + + # sysctl -w net.core.rmem_max=$((1024*1024*16)) + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Netflow Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] +===== `cache_save_path` + + * Value type is <> + * There is no default value for this setting. + +Enables the template cache and saves it in the specified directory. This +minimizes data loss after Logstash restarts because the codec doesn't have to +wait for the arrival of templates, but instead reload already received +templates received during previous runs. + +Template caches are saved as: + +* <>/netflow_templates.cache for Netflow v9 templates. +* <>/ipfix_templates.cache for IPFIX templates. + +[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] +===== `cache_ttl` + + * Value type is <> + * Default value is `4000` + +Netflow v9/v10 template cache TTL (seconds) + +[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] +===== `include_flowset_id` + + * Value type is <> + * Default value is `false` + +Only makes sense for ipfix, v9 already includes this +Setting to true will include the flowset_id in events +Allows you to work with sequences, for instance with the aggregate filter + +[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] +===== `ipfix_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing IPFIX field definitions + +Very similar to the Netflow version except there is a top level Private +Enterprise Number (PEN) key added: + +[source,yaml] +-------------------------- +pen: +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +There is an implicit PEN 0 for the standard fields. + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] +===== `netflow_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing Netflow field definitions + +Each Netflow field is defined like so: + +[source,yaml] +-------------------------- +id: +- default length in bytes +- :name +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"netflow"` + +Specify into what field you want the Netflow data. + +[id="{version}-plugins-{type}s-{plugin}-versions"] +===== `versions` + + * Value type is <> + * Default value is `[5, 9, 10]` + +Specify which Netflow versions you will accept. + + diff --git a/docs/versioned-plugins/codecs/netflow-v3.8.1.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.8.1.asciidoc new file mode 100644 index 000000000..849eb662c --- /dev/null +++ b/docs/versioned-plugins/codecs/netflow-v3.8.1.asciidoc @@ -0,0 +1,207 @@ +:plugin: netflow +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.8.1 +:release_date: 2017-11-19 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.8.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Netflow codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. + +==== Supported Netflow/IPFIX exporters + +This codec supports: + +* Netflow v5 +* Netflow v9 +* IPFIX + +The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: + +[cols="6,^2,^2,^2,12",options="header"] +|=========================================================================================== +|Netflow exporter | v5 | v9 | IPFIX | Remarks +|Barracuda Firewall | | | y | +|Cisco ASA | | y | | +|Cisco ASR | | y | | +|Cisco IOS 12.x | | y | | +|Cisco ISR w/ HSL | | n | | https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 +|Cisco WLC | | y | | +|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown +|fprobe | y | | | +|Fortigate FortiOS | | y | | +|ipt_NETFLOW | y | y | y | +|Juniper MX80 | y | | | SW > 12.3R8 +|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow +|nProbe | y | y | y | L7 DPI fields now also supported +|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 +|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd +|Streamcore Streamgroomer | | y | | +|Palo Alto PAN-OS | | y | | +|Ubiquiti Edgerouter X | | y | | With MPLS labels +|VMware VDS | | | y | Still some unknown fields +|YAF | | | y | With silk and applabel, but no DPI plugin support +|=========================================================================================== + +==== Usage + +Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + } +} +-------------------------- + +For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + receive_buffer_bytes => 16777216 + workers => 16 + } +-------------------------- + +To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: + + # sysctl -w net.core.rmem_max=$((1024*1024*16)) + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Netflow Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] +===== `cache_save_path` + + * Value type is <> + * There is no default value for this setting. + +Enables the template cache and saves it in the specified directory. This +minimizes data loss after Logstash restarts because the codec doesn't have to +wait for the arrival of templates, but instead reload already received +templates received during previous runs. + +Template caches are saved as: + +* <>/netflow_templates.cache for Netflow v9 templates. +* <>/ipfix_templates.cache for IPFIX templates. + +[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] +===== `cache_ttl` + + * Value type is <> + * Default value is `4000` + +Netflow v9/v10 template cache TTL (seconds) + +[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] +===== `include_flowset_id` + + * Value type is <> + * Default value is `false` + +Only makes sense for ipfix, v9 already includes this +Setting to true will include the flowset_id in events +Allows you to work with sequences, for instance with the aggregate filter + +[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] +===== `ipfix_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing IPFIX field definitions + +Very similar to the Netflow version except there is a top level Private +Enterprise Number (PEN) key added: + +[source,yaml] +-------------------------- +pen: +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +There is an implicit PEN 0 for the standard fields. + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] +===== `netflow_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing Netflow field definitions + +Each Netflow field is defined like so: + +[source,yaml] +-------------------------- +id: +- default length in bytes +- :name +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"netflow"` + +Specify into what field you want the Netflow data. + +[id="{version}-plugins-{type}s-{plugin}-versions"] +===== `versions` + + * Value type is <> + * Default value is `[5, 9, 10]` + +Specify which Netflow versions you will accept. + + diff --git a/docs/versioned-plugins/codecs/netflow-v3.8.3.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.8.3.asciidoc new file mode 100644 index 000000000..5a38c6952 --- /dev/null +++ b/docs/versioned-plugins/codecs/netflow-v3.8.3.asciidoc @@ -0,0 +1,207 @@ +:plugin: netflow +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.8.3 +:release_date: 2017-11-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.8.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Netflow codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. + +==== Supported Netflow/IPFIX exporters + +This codec supports: + +* Netflow v5 +* Netflow v9 +* IPFIX + +The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: + +[cols="6,^2,^2,^2,12",options="header"] +|=========================================================================================== +|Netflow exporter | v5 | v9 | IPFIX | Remarks +|Barracuda Firewall | | | y | +|Cisco ASA | | y | | +|Cisco ASR | | y | | +|Cisco IOS 12.x | | y | | +|Cisco ISR w/ HSL | | n | | https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 +|Cisco WLC | | y | | +|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown +|fprobe | y | | | +|Fortigate FortiOS | | y | | +|ipt_NETFLOW | y | y | y | +|Juniper MX80 | y | | | SW > 12.3R8 +|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow +|nProbe | y | y | y | L7 DPI fields now also supported +|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 +|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd +|Streamcore Streamgroomer | | y | | +|Palo Alto PAN-OS | | y | | +|Ubiquiti Edgerouter X | | y | | With MPLS labels +|VMware VDS | | | y | Still some unknown fields +|YAF | | | y | With silk and applabel, but no DPI plugin support +|=========================================================================================== + +==== Usage + +Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + } +} +-------------------------- + +For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + receive_buffer_bytes => 16777216 + workers => 16 + } +-------------------------- + +To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: + + # sysctl -w net.core.rmem_max=$((1024*1024*16)) + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Netflow Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] +===== `cache_save_path` + + * Value type is <> + * There is no default value for this setting. + +Enables the template cache and saves it in the specified directory. This +minimizes data loss after Logstash restarts because the codec doesn't have to +wait for the arrival of templates, but instead reload already received +templates received during previous runs. + +Template caches are saved as: + +* <>/netflow_templates.cache for Netflow v9 templates. +* <>/ipfix_templates.cache for IPFIX templates. + +[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] +===== `cache_ttl` + + * Value type is <> + * Default value is `4000` + +Netflow v9/v10 template cache TTL (seconds) + +[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] +===== `include_flowset_id` + + * Value type is <> + * Default value is `false` + +Only makes sense for ipfix, v9 already includes this +Setting to true will include the flowset_id in events +Allows you to work with sequences, for instance with the aggregate filter + +[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] +===== `ipfix_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing IPFIX field definitions + +Very similar to the Netflow version except there is a top level Private +Enterprise Number (PEN) key added: + +[source,yaml] +-------------------------- +pen: +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +There is an implicit PEN 0 for the standard fields. + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] +===== `netflow_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing Netflow field definitions + +Each Netflow field is defined like so: + +[source,yaml] +-------------------------- +id: +- default length in bytes +- :name +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"netflow"` + +Specify into what field you want the Netflow data. + +[id="{version}-plugins-{type}s-{plugin}-versions"] +===== `versions` + + * Value type is <> + * Default value is `[5, 9, 10]` + +Specify which Netflow versions you will accept. + + diff --git a/docs/versioned-plugins/codecs/netflow-v3.9.0.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.9.0.asciidoc new file mode 100644 index 000000000..6a0af3dc4 --- /dev/null +++ b/docs/versioned-plugins/codecs/netflow-v3.9.0.asciidoc @@ -0,0 +1,209 @@ +:plugin: netflow +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.9.0 +:release_date: 2017-12-03 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.9.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Netflow codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. + +==== Supported Netflow/IPFIX exporters + +This codec supports: + +* Netflow v5 +* Netflow v9 +* IPFIX + +The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: + +[cols="6,^2,^2,^2,12",options="header"] +|=========================================================================================== +|Netflow exporter | v5 | v9 | IPFIX | Remarks +|Barracuda Firewall | | | y | +|Cisco ASA | | y | | +|Cisco ASR 1k | | | n | Fails because of duplicate fields +|Cisco ASR 9k | | y | | +|Cisco IOS 12.x | | y | | +|Cisco ISR w/ HSL | | n | | Fails because of duplicate fields, see: https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 +|Cisco WLC | | y | | +|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown +|fprobe | y | | | +|Fortigate FortiOS | | y | | +|ipt_NETFLOW | y | y | y | +|Juniper MX80 | y | | | SW > 12.3R8 +|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow +|nProbe | y | y | y | L7 DPI fields now also supported +|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 +|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd +|Streamcore Streamgroomer | | y | | +|Palo Alto PAN-OS | | y | | +|Ubiquiti Edgerouter X | | y | | With MPLS labels +|VMware VDS | | | y | Still some unknown fields +|YAF | | | y | With silk and applabel, but no DPI plugin support +|vIPtela | | | y | +|=========================================================================================== + +==== Usage + +Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + } +} +-------------------------- + +For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + receive_buffer_bytes => 16777216 + workers => 16 + } +-------------------------- + +To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: + + # sysctl -w net.core.rmem_max=$((1024*1024*16)) + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Netflow Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] +===== `cache_save_path` + + * Value type is <> + * There is no default value for this setting. + +Enables the template cache and saves it in the specified directory. This +minimizes data loss after Logstash restarts because the codec doesn't have to +wait for the arrival of templates, but instead reload already received +templates received during previous runs. + +Template caches are saved as: + +* <>/netflow_templates.cache for Netflow v9 templates. +* <>/ipfix_templates.cache for IPFIX templates. + +[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] +===== `cache_ttl` + + * Value type is <> + * Default value is `4000` + +Netflow v9/v10 template cache TTL (seconds) + +[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] +===== `include_flowset_id` + + * Value type is <> + * Default value is `false` + +Only makes sense for ipfix, v9 already includes this +Setting to true will include the flowset_id in events +Allows you to work with sequences, for instance with the aggregate filter + +[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] +===== `ipfix_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing IPFIX field definitions + +Very similar to the Netflow version except there is a top level Private +Enterprise Number (PEN) key added: + +[source,yaml] +-------------------------- +pen: +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +There is an implicit PEN 0 for the standard fields. + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] +===== `netflow_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing Netflow field definitions + +Each Netflow field is defined like so: + +[source,yaml] +-------------------------- +id: +- default length in bytes +- :name +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"netflow"` + +Specify into what field you want the Netflow data. + +[id="{version}-plugins-{type}s-{plugin}-versions"] +===== `versions` + + * Value type is <> + * Default value is `[5, 9, 10]` + +Specify which Netflow versions you will accept. + + diff --git a/docs/versioned-plugins/codecs/netflow-v3.9.1.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.9.1.asciidoc new file mode 100644 index 000000000..0352129a1 --- /dev/null +++ b/docs/versioned-plugins/codecs/netflow-v3.9.1.asciidoc @@ -0,0 +1,209 @@ +:plugin: netflow +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.9.1 +:release_date: 2017-12-30 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.9.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Netflow codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. + +==== Supported Netflow/IPFIX exporters + +This codec supports: + +* Netflow v5 +* Netflow v9 +* IPFIX + +The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: + +[cols="6,^2,^2,^2,12",options="header"] +|=========================================================================================== +|Netflow exporter | v5 | v9 | IPFIX | Remarks +|Barracuda Firewall | | | y | +|Cisco ASA | | y | | +|Cisco ASR 1k | | | n | Fails because of duplicate fields +|Cisco ASR 9k | | y | | +|Cisco IOS 12.x | | y | | +|Cisco ISR w/ HSL | | n | | Fails because of duplicate fields, see: https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 +|Cisco WLC | | y | | +|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown +|fprobe | y | | | +|Fortigate FortiOS | | y | | +|ipt_NETFLOW | y | y | y | +|Juniper MX80 | y | | | SW > 12.3R8 +|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow +|nProbe | y | y | y | L7 DPI fields now also supported +|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 +|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd +|Streamcore Streamgroomer | | y | | +|Palo Alto PAN-OS | | y | | +|Ubiquiti Edgerouter X | | y | | With MPLS labels +|VMware VDS | | | y | Still some unknown fields +|YAF | | | y | With silk and applabel, but no DPI plugin support +|vIPtela | | | y | +|=========================================================================================== + +==== Usage + +Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + } +} +-------------------------- + +For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. + +[source, ruby] +-------------------------- +input { + udp { + port => 2055 + codec => netflow + receive_buffer_bytes => 16777216 + workers => 16 + } +-------------------------- + +To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: + + # sysctl -w net.core.rmem_max=$((1024*1024*16)) + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Netflow Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] +===== `cache_save_path` + + * Value type is <> + * There is no default value for this setting. + +Enables the template cache and saves it in the specified directory. This +minimizes data loss after Logstash restarts because the codec doesn't have to +wait for the arrival of templates, but instead reload already received +templates received during previous runs. + +Template caches are saved as: + +* <>/netflow_templates.cache for Netflow v9 templates. +* <>/ipfix_templates.cache for IPFIX templates. + +[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] +===== `cache_ttl` + + * Value type is <> + * Default value is `4000` + +Netflow v9/v10 template cache TTL (seconds) + +[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] +===== `include_flowset_id` + + * Value type is <> + * Default value is `false` + +Only makes sense for ipfix, v9 already includes this +Setting to true will include the flowset_id in events +Allows you to work with sequences, for instance with the aggregate filter + +[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] +===== `ipfix_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing IPFIX field definitions + +Very similar to the Netflow version except there is a top level Private +Enterprise Number (PEN) key added: + +[source,yaml] +-------------------------- +pen: +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +There is an implicit PEN 0 for the standard fields. + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] +===== `netflow_definitions` + + * Value type is <> + * There is no default value for this setting. + +Override YAML file containing Netflow field definitions + +Each Netflow field is defined like so: + +[source,yaml] +-------------------------- +id: +- default length in bytes +- :name +id: +- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string +- :name +id: +- :skip +-------------------------- + +See for the base set. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"netflow"` + +Specify into what field you want the Netflow data. + +[id="{version}-plugins-{type}s-{plugin}-versions"] +===== `versions` + + * Value type is <> + * Default value is `[5, 9, 10]` + +Specify which Netflow versions you will accept. + + diff --git a/docs/versioned-plugins/codecs/nmap-index.asciidoc b/docs/versioned-plugins/codecs/nmap-index.asciidoc new file mode 100644 index 000000000..9826c0a6e --- /dev/null +++ b/docs/versioned-plugins/codecs/nmap-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: nmap +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::nmap-v0.0.21.asciidoc[] +include::nmap-v0.0.20.asciidoc[] +include::nmap-v0.0.19.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/nmap-v0.0.19.asciidoc b/docs/versioned-plugins/codecs/nmap-v0.0.19.asciidoc new file mode 100644 index 000000000..04ae86079 --- /dev/null +++ b/docs/versioned-plugins/codecs/nmap-v0.0.19.asciidoc @@ -0,0 +1,81 @@ +:plugin: nmap +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v0.0.19 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-nmap/blob/v0.0.19/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Nmap codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec is used to parse https://nmap.org/[namp] output data which is serialized in XML format. Nmap ("Network Mapper") is a free and open source utility for network discovery and security auditing. +For more information on nmap, see https://nmap.org/. + +This codec can only be used for decoding data. + +Event types are listed below + +`nmap_scan_metadata`: An object containing top level information about the scan, including how many hosts were up, and how many were down. Useful for the case where you need to check if a DNS based hostname does not resolve, where both those numbers will be zero. +`nmap_host`: One event is created per host. The full data covering an individual host, including open ports and traceroute information as a nested structure. +`nmap_port`: One event is created per host/port. This duplicates data already in `nmap_host`: This was put in for the case where you want to model ports as separate documents in Elasticsearch (which Kibana prefers). +`nmap_traceroute_link`: One of these is output per traceroute 'connection', with a `from` and a `to` object describing each hop. Note that traceroute hop data is not always correct due to the fact that each tracing ICMP packet may take a different route. Also very useful for Kibana visualizations. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Nmap Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-emit_hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-emit_ports>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-emit_scan_metadata>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-emit_traceroute_links>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-emit_hosts"] +===== `emit_hosts` + + * Value type is <> + * Default value is `true` + +Emit all host data as a nested document (including ports + traceroutes) with the type 'nmap_fullscan' + +[id="{version}-plugins-{type}s-{plugin}-emit_ports"] +===== `emit_ports` + + * Value type is <> + * Default value is `true` + +Emit each port as a separate document with type 'nmap_port' + +[id="{version}-plugins-{type}s-{plugin}-emit_scan_metadata"] +===== `emit_scan_metadata` + + * Value type is <> + * Default value is `true` + +Emit scan metadata + +[id="{version}-plugins-{type}s-{plugin}-emit_traceroute_links"] +===== `emit_traceroute_links` + + * Value type is <> + * Default value is `true` + +Emit each hop_tuple of the traceroute with type 'nmap_traceroute_link' + + diff --git a/docs/versioned-plugins/codecs/nmap-v0.0.20.asciidoc b/docs/versioned-plugins/codecs/nmap-v0.0.20.asciidoc new file mode 100644 index 000000000..6537a5a58 --- /dev/null +++ b/docs/versioned-plugins/codecs/nmap-v0.0.20.asciidoc @@ -0,0 +1,81 @@ +:plugin: nmap +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v0.0.20 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-nmap/blob/v0.0.20/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Nmap codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec is used to parse https://nmap.org/[namp] output data which is serialized in XML format. Nmap ("Network Mapper") is a free and open source utility for network discovery and security auditing. +For more information on nmap, see https://nmap.org/. + +This codec can only be used for decoding data. + +Event types are listed below + +`nmap_scan_metadata`: An object containing top level information about the scan, including how many hosts were up, and how many were down. Useful for the case where you need to check if a DNS based hostname does not resolve, where both those numbers will be zero. +`nmap_host`: One event is created per host. The full data covering an individual host, including open ports and traceroute information as a nested structure. +`nmap_port`: One event is created per host/port. This duplicates data already in `nmap_host`: This was put in for the case where you want to model ports as separate documents in Elasticsearch (which Kibana prefers). +`nmap_traceroute_link`: One of these is output per traceroute 'connection', with a `from` and a `to` object describing each hop. Note that traceroute hop data is not always correct due to the fact that each tracing ICMP packet may take a different route. Also very useful for Kibana visualizations. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Nmap Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-emit_hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-emit_ports>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-emit_scan_metadata>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-emit_traceroute_links>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-emit_hosts"] +===== `emit_hosts` + + * Value type is <> + * Default value is `true` + +Emit all host data as a nested document (including ports + traceroutes) with the type 'nmap_fullscan' + +[id="{version}-plugins-{type}s-{plugin}-emit_ports"] +===== `emit_ports` + + * Value type is <> + * Default value is `true` + +Emit each port as a separate document with type 'nmap_port' + +[id="{version}-plugins-{type}s-{plugin}-emit_scan_metadata"] +===== `emit_scan_metadata` + + * Value type is <> + * Default value is `true` + +Emit scan metadata + +[id="{version}-plugins-{type}s-{plugin}-emit_traceroute_links"] +===== `emit_traceroute_links` + + * Value type is <> + * Default value is `true` + +Emit each hop_tuple of the traceroute with type 'nmap_traceroute_link' + + diff --git a/docs/versioned-plugins/codecs/nmap-v0.0.21.asciidoc b/docs/versioned-plugins/codecs/nmap-v0.0.21.asciidoc new file mode 100644 index 000000000..d8199d52d --- /dev/null +++ b/docs/versioned-plugins/codecs/nmap-v0.0.21.asciidoc @@ -0,0 +1,81 @@ +:plugin: nmap +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v0.0.21 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-nmap/blob/v0.0.21/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Nmap codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec is used to parse https://nmap.org/[namp] output data which is serialized in XML format. Nmap ("Network Mapper") is a free and open source utility for network discovery and security auditing. +For more information on nmap, see https://nmap.org/. + +This codec can only be used for decoding data. + +Event types are listed below + +`nmap_scan_metadata`: An object containing top level information about the scan, including how many hosts were up, and how many were down. Useful for the case where you need to check if a DNS based hostname does not resolve, where both those numbers will be zero. +`nmap_host`: One event is created per host. The full data covering an individual host, including open ports and traceroute information as a nested structure. +`nmap_port`: One event is created per host/port. This duplicates data already in `nmap_host`: This was put in for the case where you want to model ports as separate documents in Elasticsearch (which Kibana prefers). +`nmap_traceroute_link`: One of these is output per traceroute 'connection', with a `from` and a `to` object describing each hop. Note that traceroute hop data is not always correct due to the fact that each tracing ICMP packet may take a different route. Also very useful for Kibana visualizations. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Nmap Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-emit_hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-emit_ports>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-emit_scan_metadata>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-emit_traceroute_links>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-emit_hosts"] +===== `emit_hosts` + + * Value type is <> + * Default value is `true` + +Emit all host data as a nested document (including ports + traceroutes) with the type 'nmap_fullscan' + +[id="{version}-plugins-{type}s-{plugin}-emit_ports"] +===== `emit_ports` + + * Value type is <> + * Default value is `true` + +Emit each port as a separate document with type 'nmap_port' + +[id="{version}-plugins-{type}s-{plugin}-emit_scan_metadata"] +===== `emit_scan_metadata` + + * Value type is <> + * Default value is `true` + +Emit scan metadata + +[id="{version}-plugins-{type}s-{plugin}-emit_traceroute_links"] +===== `emit_traceroute_links` + + * Value type is <> + * Default value is `true` + +Emit each hop_tuple of the traceroute with type 'nmap_traceroute_link' + + diff --git a/docs/versioned-plugins/codecs/oldlogstashjson-index.asciidoc b/docs/versioned-plugins/codecs/oldlogstashjson-index.asciidoc new file mode 100644 index 000000000..49f08435a --- /dev/null +++ b/docs/versioned-plugins/codecs/oldlogstashjson-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: oldlogstashjson +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::oldlogstashjson-v2.0.7.asciidoc[] +include::oldlogstashjson-v2.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/oldlogstashjson-v2.0.5.asciidoc b/docs/versioned-plugins/codecs/oldlogstashjson-v2.0.5.asciidoc new file mode 100644 index 000000000..172324e5c --- /dev/null +++ b/docs/versioned-plugins/codecs/oldlogstashjson-v2.0.5.asciidoc @@ -0,0 +1,31 @@ +:plugin: oldlogstashjson +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-oldlogstashjson/blob/v2.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Oldlogstashjson codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Oldlogstashjson Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +|======================================================================= diff --git a/docs/versioned-plugins/codecs/oldlogstashjson-v2.0.7.asciidoc b/docs/versioned-plugins/codecs/oldlogstashjson-v2.0.7.asciidoc new file mode 100644 index 000000000..74a072761 --- /dev/null +++ b/docs/versioned-plugins/codecs/oldlogstashjson-v2.0.7.asciidoc @@ -0,0 +1,26 @@ +:plugin: oldlogstashjson +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.7 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-oldlogstashjson/blob/v2.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Oldlogstashjson codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +deprecated[5.0] + +Reads Logstash JSON in the schema used by Logstash versions earlier than 1.2.0 + diff --git a/docs/versioned-plugins/codecs/plain-index.asciidoc b/docs/versioned-plugins/codecs/plain-index.asciidoc new file mode 100644 index 000000000..86af7c0e5 --- /dev/null +++ b/docs/versioned-plugins/codecs/plain-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: plain +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-19 +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::plain-v3.0.6.asciidoc[] +include::plain-v3.0.5.asciidoc[] +include::plain-v3.0.4.asciidoc[] +include::plain-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/plain-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/plain-v3.0.3.asciidoc new file mode 100644 index 000000000..1571305b8 --- /dev/null +++ b/docs/versioned-plugins/codecs/plain-v3.0.3.asciidoc @@ -0,0 +1,65 @@ +:plugin: plain +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-plain/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Plain codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "plain" codec is for plain text with no delimiting between events. + +This is mainly useful on inputs and outputs that already have a defined +framing in their transport protocol (such as zeromq, rabbitmq, redis, etc) + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Plain Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this input. Examples include `UTF-8` +and `cp1252` + +This setting is useful if your log files are in `Latin-1` (aka `cp1252`) +or in another character set other than `UTF-8`. + +This only affects "plain" format logs since json is `UTF-8` already. + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * There is no default value for this setting. + +Set the message you which to emit for each event. This supports `sprintf` +strings. + +This setting only affects outputs (encoding of events). + + diff --git a/docs/versioned-plugins/codecs/plain-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/plain-v3.0.4.asciidoc new file mode 100644 index 000000000..35533ee82 --- /dev/null +++ b/docs/versioned-plugins/codecs/plain-v3.0.4.asciidoc @@ -0,0 +1,65 @@ +:plugin: plain +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-plain/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Plain codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "plain" codec is for plain text with no delimiting between events. + +This is mainly useful on inputs and outputs that already have a defined +framing in their transport protocol (such as zeromq, rabbitmq, redis, etc) + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Plain Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this input. Examples include `UTF-8` +and `cp1252` + +This setting is useful if your log files are in `Latin-1` (aka `cp1252`) +or in another character set other than `UTF-8`. + +This only affects "plain" format logs since json is `UTF-8` already. + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * There is no default value for this setting. + +Set the message you which to emit for each event. This supports `sprintf` +strings. + +This setting only affects outputs (encoding of events). + + diff --git a/docs/versioned-plugins/codecs/plain-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/plain-v3.0.5.asciidoc new file mode 100644 index 000000000..a5b1fc125 --- /dev/null +++ b/docs/versioned-plugins/codecs/plain-v3.0.5.asciidoc @@ -0,0 +1,65 @@ +:plugin: plain +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-plain/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Plain codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "plain" codec is for plain text with no delimiting between events. + +This is mainly useful on inputs and outputs that already have a defined +framing in their transport protocol (such as zeromq, rabbitmq, redis, etc) + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Plain Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this input. Examples include `UTF-8` +and `cp1252` + +This setting is useful if your log files are in `Latin-1` (aka `cp1252`) +or in another character set other than `UTF-8`. + +This only affects "plain" format logs since json is `UTF-8` already. + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * There is no default value for this setting. + +Set the message you which to emit for each event. This supports `sprintf` +strings. + +This setting only affects outputs (encoding of events). + + diff --git a/docs/versioned-plugins/codecs/plain-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/plain-v3.0.6.asciidoc new file mode 100644 index 000000000..4f79e2df5 --- /dev/null +++ b/docs/versioned-plugins/codecs/plain-v3.0.6.asciidoc @@ -0,0 +1,65 @@ +:plugin: plain +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-12-19 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-plain/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Plain codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "plain" codec is for plain text with no delimiting between events. + +This is mainly useful on inputs and outputs that already have a defined +framing in their transport protocol (such as zeromq, rabbitmq, redis, etc) + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Plain Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +The character encoding used in this input. Examples include `UTF-8` +and `cp1252` + +This setting is useful if your log files are in `Latin-1` (aka `cp1252`) +or in another character set other than `UTF-8`. + +This only affects "plain" format logs since json is `UTF-8` already. + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * There is no default value for this setting. + +Set the message you which to emit for each event. This supports `sprintf` +strings. + +This setting only affects outputs (encoding of events). + + diff --git a/docs/versioned-plugins/codecs/pretty-index.asciidoc b/docs/versioned-plugins/codecs/pretty-index.asciidoc new file mode 100644 index 000000000..13466401f --- /dev/null +++ b/docs/versioned-plugins/codecs/pretty-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: pretty +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/codecs/protobuf-index.asciidoc b/docs/versioned-plugins/codecs/protobuf-index.asciidoc new file mode 100644 index 000000000..c80ffa368 --- /dev/null +++ b/docs/versioned-plugins/codecs/protobuf-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: protobuf +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::protobuf-v1.0.3.asciidoc[] +include::protobuf-v1.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/protobuf-v1.0.2.asciidoc b/docs/versioned-plugins/codecs/protobuf-v1.0.2.asciidoc new file mode 100644 index 000000000..f92ad5190 --- /dev/null +++ b/docs/versioned-plugins/codecs/protobuf-v1.0.2.asciidoc @@ -0,0 +1,106 @@ +:plugin: protobuf +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-protobuf/blob/v1.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Protobuf codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec converts protobuf encoded messages into logstash events and vice versa. + +Requires the protobuf definitions as ruby files. You can create those using the [ruby-protoc compiler](https://github.com/codekitchen/ruby-protocol-buffers). + +The following shows a usage example for decoding events from a kafka stream: +[source,ruby] +kafka +{ + zk_connect => "127.0.0.1" + topic_id => "your_topic_goes_here" + codec => protobuf + { + class_name => "Animal::Unicorn" + include_path => ['/path/to/protobuf/definitions/UnicornProtobuf.pb.rb'] + } +} + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Protobuf Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-class_name>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-include_path>> |<>|Yes +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-class_name"] +===== `class_name` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Name of the class to decode. +If your protobuf definition contains modules, prepend them to the class name with double colons like so: +[source,ruby] +class_name => "Foods::Dairy::Cheese" + +This corresponds to a protobuf definition starting as follows: +[source,ruby] +module Foods + module Dairy + class Cheese + # here are your field definitions. + +If your class references other definitions: you only have to add the main class here. + +[id="{version}-plugins-{type}s-{plugin}-include_path"] +===== `include_path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +List of absolute pathes to files with protobuf definitions. +When using more than one file, make sure to arrange the files in reverse order of dependency so that each class is loaded before it is +refered to by another. + +Example: a class _Cheese_ referencing another protobuf class _Milk_ +[source,ruby] +module Foods + module Dairy + class Cheese + set_fully_qualified_name "Foods.Dairy.Cheese" + optional ::Foods::Cheese::Milk, :milk, 1 + optional :int64, :unique_id, 2 + # here be more field definitions + +would be configured as +[source,ruby] +include_path => ['/path/to/protobuf/definitions/Milk.pb.rb','/path/to/protobuf/definitions/Cheese.pb.rb'] + +When using the codec in an output plugin: +* make sure to include all the desired fields in the protobuf definition, including timestamp. + Remove fields that are not part of the protobuf definition from the event by using the mutate filter. +* the @ symbol is currently not supported in field names when loading the protobuf definitions for encoding. Make sure to call the timestamp field "timestamp" + instead of "@timestamp" in the protobuf file. Logstash event fields will be stripped of the leading @ before conversion. + + + diff --git a/docs/versioned-plugins/codecs/protobuf-v1.0.3.asciidoc b/docs/versioned-plugins/codecs/protobuf-v1.0.3.asciidoc new file mode 100644 index 000000000..2db37c2c8 --- /dev/null +++ b/docs/versioned-plugins/codecs/protobuf-v1.0.3.asciidoc @@ -0,0 +1,106 @@ +:plugin: protobuf +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-protobuf/blob/v1.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Protobuf codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This codec converts protobuf encoded messages into logstash events and vice versa. + +Requires the protobuf definitions as ruby files. You can create those using the [ruby-protoc compiler](https://github.com/codekitchen/ruby-protocol-buffers). + +The following shows a usage example for decoding events from a kafka stream: +[source,ruby] +kafka +{ + zk_connect => "127.0.0.1" + topic_id => "your_topic_goes_here" + codec => protobuf + { + class_name => "Animal::Unicorn" + include_path => ['/path/to/protobuf/definitions/UnicornProtobuf.pb.rb'] + } +} + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Protobuf Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-class_name>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-include_path>> |<>|Yes +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-class_name"] +===== `class_name` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Name of the class to decode. +If your protobuf definition contains modules, prepend them to the class name with double colons like so: +[source,ruby] +class_name => "Foods::Dairy::Cheese" + +This corresponds to a protobuf definition starting as follows: +[source,ruby] +module Foods + module Dairy + class Cheese + # here are your field definitions. + +If your class references other definitions: you only have to add the main class here. + +[id="{version}-plugins-{type}s-{plugin}-include_path"] +===== `include_path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +List of absolute pathes to files with protobuf definitions. +When using more than one file, make sure to arrange the files in reverse order of dependency so that each class is loaded before it is +refered to by another. + +Example: a class _Cheese_ referencing another protobuf class _Milk_ +[source,ruby] +module Foods + module Dairy + class Cheese + set_fully_qualified_name "Foods.Dairy.Cheese" + optional ::Foods::Cheese::Milk, :milk, 1 + optional :int64, :unique_id, 2 + # here be more field definitions + +would be configured as +[source,ruby] +include_path => ['/path/to/protobuf/definitions/Milk.pb.rb','/path/to/protobuf/definitions/Cheese.pb.rb'] + +When using the codec in an output plugin: +* make sure to include all the desired fields in the protobuf definition, including timestamp. + Remove fields that are not part of the protobuf definition from the event by using the mutate filter. +* the @ symbol is currently not supported in field names when loading the protobuf definitions for encoding. Make sure to call the timestamp field "timestamp" + instead of "@timestamp" in the protobuf file. Logstash event fields will be stripped of the leading @ before conversion. + + + diff --git a/docs/versioned-plugins/codecs/rubydebug-index.asciidoc b/docs/versioned-plugins/codecs/rubydebug-index.asciidoc new file mode 100644 index 000000000..338dc8afd --- /dev/null +++ b/docs/versioned-plugins/codecs/rubydebug-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: rubydebug +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-14 +| <> | 2017-08-21 +| <> | 2017-06-23 +|======================================================================= + +include::rubydebug-v3.0.5.asciidoc[] +include::rubydebug-v3.0.4.asciidoc[] +include::rubydebug-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc new file mode 100644 index 000000000..6137822a0 --- /dev/null +++ b/docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc @@ -0,0 +1,46 @@ +:plugin: rubydebug +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-rubydebug/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Rubydebug codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The rubydebug codec will output your Logstash event data using +the Ruby Awesome Print library. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rubydebug Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-metadata>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-metadata"] +===== `metadata` + + * Value type is <> + * Default value is `false` + +Should the event's metadata be included? + + diff --git a/docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc new file mode 100644 index 000000000..a77db3a2d --- /dev/null +++ b/docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc @@ -0,0 +1,46 @@ +:plugin: rubydebug +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-21 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-rubydebug/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rubydebug codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The rubydebug codec will output your Logstash event data using +the Ruby Awesome Print library. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rubydebug Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-metadata>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-metadata"] +===== `metadata` + + * Value type is <> + * Default value is `false` + +Should the event's metadata be included? + + diff --git a/docs/versioned-plugins/codecs/rubydebug-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/rubydebug-v3.0.5.asciidoc new file mode 100644 index 000000000..db152424a --- /dev/null +++ b/docs/versioned-plugins/codecs/rubydebug-v3.0.5.asciidoc @@ -0,0 +1,46 @@ +:plugin: rubydebug +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-14 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-rubydebug/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rubydebug codec plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The rubydebug codec will output your Logstash event data using +the Ruby Awesome Print library. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rubydebug Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-metadata>> |<>|No +|======================================================================= + +  + +[id="{version}-plugins-{type}s-{plugin}-metadata"] +===== `metadata` + + * Value type is <> + * Default value is `false` + +Should the event's metadata be included? + + diff --git a/docs/versioned-plugins/codecs/s3plain-index.asciidoc b/docs/versioned-plugins/codecs/s3plain-index.asciidoc new file mode 100644 index 000000000..33ec45b85 --- /dev/null +++ b/docs/versioned-plugins/codecs/s3plain-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: s3plain +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-19 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::s3plain-v2.0.7.asciidoc[] +include::s3plain-v2.0.6.asciidoc[] +include::s3plain-v2.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/codecs/s3plain-v2.0.5.asciidoc b/docs/versioned-plugins/codecs/s3plain-v2.0.5.asciidoc new file mode 100644 index 000000000..acb946692 --- /dev/null +++ b/docs/versioned-plugins/codecs/s3plain-v2.0.5.asciidoc @@ -0,0 +1,32 @@ +:plugin: s3_plain +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-s3plain/blob/v2.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== S3_plain + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "s3_plain" codec is used for backward compatibility with previous version of the S3 Output + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== S3_plain Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +|======================================================================= diff --git a/docs/versioned-plugins/codecs/s3plain-v2.0.6.asciidoc b/docs/versioned-plugins/codecs/s3plain-v2.0.6.asciidoc new file mode 100644 index 000000000..45c9b2034 --- /dev/null +++ b/docs/versioned-plugins/codecs/s3plain-v2.0.6.asciidoc @@ -0,0 +1,32 @@ +:plugin: s3_plain +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.6 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-s3plain/blob/v2.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== S3_plain + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "s3_plain" codec is used for backward compatibility with previous version of the S3 Output + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== S3_plain Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +|======================================================================= diff --git a/docs/versioned-plugins/codecs/s3plain-v2.0.7.asciidoc b/docs/versioned-plugins/codecs/s3plain-v2.0.7.asciidoc new file mode 100644 index 000000000..d1bcbc321 --- /dev/null +++ b/docs/versioned-plugins/codecs/s3plain-v2.0.7.asciidoc @@ -0,0 +1,32 @@ +:plugin: s3_plain +:type: codec + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.7 +:release_date: 2017-12-19 +:changelog_url: https://github.com/logstash-plugins/logstash-codec-s3plain/blob/v2.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== S3_plain + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The "s3_plain" codec is used for backward compatibility with previous version of the S3 Output + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== S3_plain Codec Configuration Options + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +|======================================================================= diff --git a/docs/versioned-plugins/codecs/sflow-index.asciidoc b/docs/versioned-plugins/codecs/sflow-index.asciidoc new file mode 100644 index 000000000..916b2160c --- /dev/null +++ b/docs/versioned-plugins/codecs/sflow-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: sflow +:type: codec + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/filters-index.asciidoc b/docs/versioned-plugins/filters-index.asciidoc new file mode 100644 index 000000000..a571b9941 --- /dev/null +++ b/docs/versioned-plugins/filters-index.asciidoc @@ -0,0 +1,69 @@ +:type: filter +:type_uc: Filter + +include::include/plugin-intro.asciidoc[] + +include::filters/age-index.asciidoc[] +include::filters/aggregate-index.asciidoc[] +include::filters/alter-index.asciidoc[] +include::filters/anonymize-index.asciidoc[] +include::filters/bytesize-index.asciidoc[] +include::filters/checksum-index.asciidoc[] +include::filters/cidr-index.asciidoc[] +include::filters/cipher-index.asciidoc[] +include::filters/clone-index.asciidoc[] +include::filters/cloudfoundry-index.asciidoc[] +include::filters/collate-index.asciidoc[] +include::filters/csv-index.asciidoc[] +include::filters/date-index.asciidoc[] +include::filters/de_dot-index.asciidoc[] +include::filters/debug-index.asciidoc[] +include::filters/dissect-index.asciidoc[] +include::filters/dns-index.asciidoc[] +include::filters/drop-index.asciidoc[] +include::filters/elapsed-index.asciidoc[] +include::filters/elasticsearch-index.asciidoc[] +include::filters/emoji-index.asciidoc[] +include::filters/environment-index.asciidoc[] +include::filters/example-index.asciidoc[] +include::filters/extractnumbers-index.asciidoc[] +include::filters/fingerprint-index.asciidoc[] +include::filters/geoip-index.asciidoc[] +include::filters/grok-index.asciidoc[] +include::filters/hashid-index.asciidoc[] +include::filters/i18n-index.asciidoc[] +include::filters/jdbc_static-index.asciidoc[] +include::filters/jdbc_streaming-index.asciidoc[] +include::filters/json-index.asciidoc[] +include::filters/json_encode-index.asciidoc[] +include::filters/kubernetes_metadata-index.asciidoc[] +include::filters/kv-index.asciidoc[] +include::filters/language-index.asciidoc[] +include::filters/lookup-index.asciidoc[] +include::filters/math-index.asciidoc[] +include::filters/metaevent-index.asciidoc[] +include::filters/metricize-index.asciidoc[] +include::filters/metrics-index.asciidoc[] +include::filters/multiline-index.asciidoc[] +include::filters/mutate-index.asciidoc[] +include::filters/oui-index.asciidoc[] +include::filters/prune-index.asciidoc[] +include::filters/punct-index.asciidoc[] +include::filters/range-index.asciidoc[] +include::filters/ruby-index.asciidoc[] +include::filters/script-index.asciidoc[] +include::filters/sleep-index.asciidoc[] +include::filters/split-index.asciidoc[] +include::filters/syslog_pri-index.asciidoc[] +include::filters/throttle-index.asciidoc[] +include::filters/tld-index.asciidoc[] +include::filters/translate-index.asciidoc[] +include::filters/truncate-index.asciidoc[] +include::filters/unique-index.asciidoc[] +include::filters/urldecode-index.asciidoc[] +include::filters/useragent-index.asciidoc[] +include::filters/uuid-index.asciidoc[] +include::filters/xml-index.asciidoc[] +include::filters/yaml-index.asciidoc[] +include::filters/zeromq-index.asciidoc[] + diff --git a/docs/versioned-plugins/filters/age-index.asciidoc b/docs/versioned-plugins/filters/age-index.asciidoc new file mode 100644 index 000000000..d04a41129 --- /dev/null +++ b/docs/versioned-plugins/filters/age-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: age +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::age-v1.0.2.asciidoc[] +include::age-v1.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/age-v1.0.1.asciidoc b/docs/versioned-plugins/filters/age-v1.0.1.asciidoc new file mode 100644 index 000000000..bc61d99d0 --- /dev/null +++ b/docs/versioned-plugins/filters/age-v1.0.1.asciidoc @@ -0,0 +1,66 @@ +:plugin: age +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-age/blob/v1.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Age filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +A simple filter for calculating the age of an event. + +This filter calculates the age of an event by subtracting the event timestamp +from the current timestamp. This allows you to drop Logstash events that are +older than some threshold. + +[source,ruby] +filter { + age {} + + if [@metadata][age] > 86400 { + drop {} + } +} + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Age Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"[@metadata][age]"` + +Define the target field for the event age, in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/age-v1.0.2.asciidoc b/docs/versioned-plugins/filters/age-v1.0.2.asciidoc new file mode 100644 index 000000000..2fab6d4f1 --- /dev/null +++ b/docs/versioned-plugins/filters/age-v1.0.2.asciidoc @@ -0,0 +1,66 @@ +:plugin: age +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-age/blob/v1.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Age filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +A simple filter for calculating the age of an event. + +This filter calculates the age of an event by subtracting the event timestamp +from the current timestamp. This allows you to drop Logstash events that are +older than some threshold. + +[source,ruby] +filter { + age {} + + if [@metadata][age] > 86400 { + drop {} + } +} + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Age Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"[@metadata][age]"` + +Define the target field for the event age, in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/aggregate-index.asciidoc b/docs/versioned-plugins/filters/aggregate-index.asciidoc new file mode 100644 index 000000000..b9ff863d6 --- /dev/null +++ b/docs/versioned-plugins/filters/aggregate-index.asciidoc @@ -0,0 +1,24 @@ +:plugin: aggregate +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-16 +| <> | 2017-11-07 +| <> | 2017-11-03 +| <> | 2017-10-10 +| <> | 2017-08-15 +| <> | 2017-06-23 +| <> | 2017-06-11 +|======================================================================= + +include::aggregate-v2.7.2.asciidoc[] +include::aggregate-v2.7.1.asciidoc[] +include::aggregate-v2.7.0.asciidoc[] +include::aggregate-v2.6.4.asciidoc[] +include::aggregate-v2.6.3.asciidoc[] +include::aggregate-v2.6.1.asciidoc[] +include::aggregate-v2.6.0.asciidoc[] + diff --git a/docs/versioned-plugins/filters/aggregate-v2.6.0.asciidoc b/docs/versioned-plugins/filters/aggregate-v2.6.0.asciidoc new file mode 100644 index 000000000..0b581f621 --- /dev/null +++ b/docs/versioned-plugins/filters/aggregate-v2.6.0.asciidoc @@ -0,0 +1,552 @@ +:plugin: aggregate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.6.0 +:release_date: 2017-06-11 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-aggregate/blob/v2.6.0/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Aggregate + +include::{include_path}/plugin_header.asciidoc[] + + +<<{version}-plugins-{type}s-{plugin}-description>> + +<<{version}-plugins-{type}s-{plugin}-example1>> + +<<{version}-plugins-{type}s-{plugin}-example2>> + +<<{version}-plugins-{type}s-{plugin}-example3>> + +<<{version}-plugins-{type}s-{plugin}-example4>> + +<<{version}-plugins-{type}s-{plugin}-example5>> + +<<{version}-plugins-{type}s-{plugin}-howitworks>> + +<<{version}-plugins-{type}s-{plugin}-usecases>> + +<<{version}-plugins-{type}s-{plugin}-options>> + + + +[id="{version}-plugins-{type}s-{plugin}-description"] +==== Description + + +The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, +and finally push aggregated information into final task event. + +You should be very careful to set Logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly +otherwise events may be processed out of sequence and unexpected results will occur. + + +[id="{version}-plugins-{type}s-{plugin}-example1"] +==== Example #1 + +* with these given logs : + +[source,ruby] +---------------------------------- + INFO - 12345 - TASK_START - start + INFO - 12345 - SQL - sqlQuery1 - 12 + INFO - 12345 - SQL - sqlQuery2 - 34 + INFO - 12345 - TASK_END - end +---------------------------------- + +* you can aggregate "sql duration" for the whole task with this configuration : + +[source,ruby] +---------------------------------- + filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] + } + + if [logger] == "TASK_START" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] = 0" + map_action => "create" + } + } + + if [logger] == "SQL" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] += event.get('duration')" + map_action => "update" + } + } + + if [logger] == "TASK_END" { + aggregate { + task_id => "%{taskid}" + code => "event.set('sql_duration', map['sql_duration'])" + map_action => "update" + end_of_task => true + timeout => 120 + } + } + } +---------------------------------- + +* the final event then looks like : + +[source,ruby] +---------------------------------- +{ + "message" => "INFO - 12345 - TASK_END - end message", + "sql_duration" => 46 +} +---------------------------------- + +the field `sql_duration` is added and contains the sum of all sql queries durations. + + +[id="{version}-plugins-{type}s-{plugin}-example2"] +==== Example #2 : no start event + +* If you have the same logs than example #1, but without a start log : + +[source,ruby] +---------------------------------- + INFO - 12345 - SQL - sqlQuery1 - 12 + INFO - 12345 - SQL - sqlQuery2 - 34 + INFO - 12345 - TASK_END - end +---------------------------------- + +* you can also aggregate "sql duration" with a slightly different configuration : + +[source,ruby] +---------------------------------- + filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] + } + + if [logger] == "SQL" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')" + } + } + + if [logger] == "TASK_END" { + aggregate { + task_id => "%{taskid}" + code => "event.set('sql_duration', map['sql_duration'])" + end_of_task => true + timeout => 120 + } + } + } +---------------------------------- + +* the final event is exactly the same than example #1 +* the key point is the "||=" ruby operator. It allows to initialize 'sql_duration' map entry to 0 only if this map entry is not already initialized + + +[id="{version}-plugins-{type}s-{plugin}-example3"] +==== Example #3 : no end event + +Third use case: You have no specific end event. + +A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction. + +In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when a timeout occurs. +In addition, we can enable 'timeout_code' to execute code on the populated timeout event. +We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + +* Given these logs: + +[source,ruby] +---------------------------------- +INFO - 12345 - Clicked One +INFO - 12345 - Clicked Two +INFO - 12345 - Clicked Three +---------------------------------- + +* You can aggregate the amount of clicks the user did like this: + +[source,ruby] +---------------------------------- +filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] + } + + aggregate { + task_id => "%{user_id}" + code => "map['clicks'] ||= 0; map['clicks'] += 1;" + push_map_as_event_on_timeout => true + timeout_task_id_field => "user_id" + timeout => 600 # 10 minutes timeout + timeout_tags => ['_aggregatetimeout'] + timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" + } +} +---------------------------------- + +* After ten minutes, this will yield an event like: + +[source,json] +---------------------------------- +{ + "user_id": "12345", + "clicks": 3, + "several_clicks": true, + "tags": [ + "_aggregatetimeout" + ] +} +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-example4"] +==== Example #4 : no end event and tasks come one after the other + +Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other. + +That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, ... + +In that case, you don't want to wait task timeout to flush aggregation map. + + +* A typical case is aggregating results from jdbc input plugin. +* Given that you have this SQL query : `SELECT country_name, town_name FROM town` +* Using jdbc input plugin, you get these 3 events from : + +[source,json] +---------------------------------- + { "country_name": "France", "town_name": "Paris" } + { "country_name": "France", "town_name": "Marseille" } + { "country_name": "USA", "town_name": "New-York" } +---------------------------------- + +* And you would like these 2 result events to push them into elasticsearch : + +[source,json] +---------------------------------- + { "country_name": "France", "towns": [ {"town_name": "Paris"}, {"town_name": "Marseille"} ] } + { "country_name": "USA", "towns": [ {"town_name": "New-York"} ] } +---------------------------------- + +* You can do that using `push_previous_map_as_event` aggregate plugin option : + +[source,ruby] +---------------------------------- + filter { + aggregate { + task_id => "%{country_name}" + code => " + map['country_name'] = event.get('country_name') + map['towns'] ||= [] + map['towns'] << {'town_name' => event.get('town_name')} + event.cancel() + " + push_previous_map_as_event => true + timeout => 3 + } + } +---------------------------------- + +* The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next country +* When 5s timeout comes, the last aggregate map is pushed as a new event +* Finally, initial events (which are not aggregated) are dropped because useless (thanks to `event.cancel()`) + + +[id="{version}-plugins-{type}s-{plugin}-example5"] +==== Example #5 : no end event and push events as soon as possible + +Fifth use case: like example #3, there is no end event. + +Events keep comming for an indefinite time and you want to push the aggregation map as soon as possible after the last user interaction without waiting for the `timeout`. + +This allows to have the aggregated events pushed closer to real time. + + +A typical case is aggregating or tracking user behaviour. + +We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. + +There is no specific event indicating the end of the user's interaction. + +The user ineraction will be considered as ended when no events for the specified user (task_id) arrive after the specified inactivity_timeout`. + +If the user continues interacting for longer than `timeout` seconds (since first event), the aggregation map will still be deleted and pushed as a new event when timeout occurs. + +The difference with example #3 is that the events will be pushed as soon as the user stops interacting for `inactivity_timeout` seconds instead of waiting for the end of `timeout` seconds since first event. + +In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when inactivity timeout occurs. + +In addition, we can enable 'timeout_code' to execute code on the populated timeout event. + +We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + + +* Given these logs: + +[source,ruby] +---------------------------------- +INFO - 12345 - Clicked One +INFO - 12345 - Clicked Two +INFO - 12345 - Clicked Three +---------------------------------- + +* You can aggregate the amount of clicks the user did like this: + +[source,ruby] +---------------------------------- +filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] + } + aggregate { + task_id => "%{user_id}" + code => "map['clicks'] ||= 0; map['clicks'] += 1;" + push_map_as_event_on_timeout => true + timeout_task_id_field => "user_id" + timeout => 3600 # 1 hour timeout, user activity will be considered finished one hour after the first event, even if events keep comming + inactivity_timeout => 300 # 5 minutes timeout, user activity will be considered finished if no new events arrive 5 minutes after the last event + timeout_tags => ['_aggregatetimeout'] + timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" + } +} +---------------------------------- + +* After five minutes of inactivity or one hour since first event, this will yield an event like: + +[source,json] +---------------------------------- +{ + "user_id": "12345", + "clicks": 3, + "several_clicks": true, + "tags": [ + "_aggregatetimeout" + ] +} +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-howitworks"] +==== How it works +* the filter needs a "task_id" to correlate events (log lines) of a same task +* at the task beggining, filter creates a map, attached to task_id +* for each event, you can execute code using 'event' and 'map' (for instance, copy an event field to map) +* in the final event, you can execute a last code (for instance, add map data to final event) +* after the final event, the map attached to task is deleted (thanks to `end_of_task => true`) +* an aggregate map is tied to one task_id value which is tied to one task_id pattern. So if you have 2 filters with different task_id patterns, even if you have same task_id value, they won't share the same aggregate map. +* in one filter configuration, it is recommanded to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps +* if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted +* all timeout options have to be defined in only one aggregate filter per task_id pattern. Timeout options are : timeout, inactivity_timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags +* if `code` execution raises an exception, the error is logged and event is tagged '_aggregateexception' + + +[id="{version}-plugins-{type}s-{plugin}-usecases"] +==== Use Cases +* extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2) +* extract error information in any task log line, and push it in final task event (to get a final event with all error information if any) +* extract all back-end calls as a list, and push this list in final task event (to get a task profile) +* extract all http headers logged in several lines to push this list in final task event (complete http request info) +* for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, ...) +* Finally, task id can be any correlation id matching your need : it can be a session id, a file path, ... + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Aggregate Filter Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-aggregate_maps_path>> |<>, a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-end_of_task>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-map_action>> |<>, one of `["create", "update", "create_or_update"]`|No +| <<{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-push_previous_map_as_event>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-task_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_code>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_tags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_task_id_field>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-aggregate_maps_path"] +===== `aggregate_maps_path` + + * Value type is <> + * There is no default value for this setting. + +The path to file where aggregate maps are stored when Logstash stops +and are loaded from when Logstash starts. + +If not defined, aggregate maps will not be stored at Logstash stop and will be lost. +Must be defined in only one aggregate filter (as aggregate maps are global). + +Example: +[source,ruby] + filter { + aggregate { + aggregate_maps_path => "/path/to/.aggregate_maps" + } + } + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The code to execute to update map, using current event. + +Or on the contrary, the code to execute to update event, using current map. + +You will have a 'map' variable and an 'event' variable available (that is the event itself). + +Example: +[source,ruby] + filter { + aggregate { + code => "map['sql_duration'] += event.get('duration')" + } + } + +[id="{version}-plugins-{type}s-{plugin}-end_of_task"] +===== `end_of_task` + + * Value type is <> + * Default value is `false` + +Tell the filter that task is ended, and therefore, to delete aggregate map after code execution. + +[id="{version}-plugins-{type}s-{plugin}-inactivity_timeout"] +===== `inactivity_timeout` + + * Value type is <> + * There is no default value for this setting. + +The amount of seconds (since the last event) after which a task is considered as expired. + +When timeout occurs for a task, its aggregate map is evicted. + +If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. + +`inactivity_timeout` can be defined for each "task_id" pattern. + +`inactivity_timeout` must be lower than `timeout`. + +[id="{version}-plugins-{type}s-{plugin}-map_action"] +===== `map_action` + + * Value type is <> + * Default value is `"create_or_update"` + +Tell the filter what to do with aggregate map. + +`"create"`: create the map, and execute the code only if map wasn't created before + +`"update"`: doesn't create the map, and execute the code only if map was created before + +`"create_or_update"`: create the map if it wasn't created before, execute the code in all cases + +[id="{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout"] +===== `push_map_as_event_on_timeout` + + * Value type is <> + * Default value is `false` + +When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new Logstash event. +This enables to detect and process task timeouts in Logstash, but also to manage tasks that have no explicit end event. + +[id="{version}-plugins-{type}s-{plugin}-push_previous_map_as_event"] +===== `push_previous_map_as_event` + + * Value type is <> + * Default value is `false` + +When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new Logstash event, +and then creates a new empty map for the next task. + +WARNING: this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc... + +[id="{version}-plugins-{type}s-{plugin}-task_id"] +===== `task_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The expression defining task ID to correlate logs. + +This value must uniquely identify the task. + +Example: +[source,ruby] + filter { + aggregate { + task_id => "%{type}%{my_task_id}" + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `1800` + +The amount of seconds (since the first event) after which a task is considered as expired. + +When timeout occurs for a task, its aggregate map is evicted. + +If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. + +Timeout can be defined for each "task_id" pattern. + +[id="{version}-plugins-{type}s-{plugin}-timeout_code"] +===== `timeout_code` + + * Value type is <> + * There is no default value for this setting. + +The code to execute to complete timeout generated event, when `'push_map_as_event_on_timeout'` or `'push_previous_map_as_event'` is set to true. +The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map. + +If `'timeout_task_id_field'` is set, the event is also populated with the task_id value + +Example: +[source,ruby] + filter { + aggregate { + timeout_code => "event.set('state', 'timeout')" + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout_tags"] +===== `timeout_tags` + + * Value type is <> + * Default value is `[]` + +Defines tags to add when a timeout event is generated and yield + +Example: +[source,ruby] + filter { + aggregate { + timeout_tags => ["aggregate_timeout'] + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout_task_id_field"] +===== `timeout_task_id_field` + + * Value type is <> + * There is no default value for this setting. + +This option indicates the timeout generated event's field for the "task_id" value. +The task id will then be set into the timeout event. This can help correlate which tasks have been timed out. + +For example, with option `timeout_task_id_field => "my_id"` ,when timeout task id is `"12345"`, the generated timeout event will contain `'my_id' => '12345'`. + +By default, if this option is not set, task id value won't be set into timeout generated event. + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/aggregate-v2.6.1.asciidoc b/docs/versioned-plugins/filters/aggregate-v2.6.1.asciidoc new file mode 100644 index 000000000..606a63376 --- /dev/null +++ b/docs/versioned-plugins/filters/aggregate-v2.6.1.asciidoc @@ -0,0 +1,553 @@ +:plugin: aggregate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.6.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-aggregate/blob/v2.6.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Aggregate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + + +<<{version}-plugins-{type}s-{plugin}-description>> + +<<{version}-plugins-{type}s-{plugin}-example1>> + +<<{version}-plugins-{type}s-{plugin}-example2>> + +<<{version}-plugins-{type}s-{plugin}-example3>> + +<<{version}-plugins-{type}s-{plugin}-example4>> + +<<{version}-plugins-{type}s-{plugin}-example5>> + +<<{version}-plugins-{type}s-{plugin}-howitworks>> + +<<{version}-plugins-{type}s-{plugin}-usecases>> + +<<{version}-plugins-{type}s-{plugin}-options>> + + + +[id="{version}-plugins-{type}s-{plugin}-description"] +==== Description + + +The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, +and finally push aggregated information into final task event. + +You should be very careful to set Logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly +otherwise events may be processed out of sequence and unexpected results will occur. + + +[id="{version}-plugins-{type}s-{plugin}-example1"] +==== Example #1 + +* with these given logs : + +[source,ruby] +---------------------------------- + INFO - 12345 - TASK_START - start + INFO - 12345 - SQL - sqlQuery1 - 12 + INFO - 12345 - SQL - sqlQuery2 - 34 + INFO - 12345 - TASK_END - end +---------------------------------- + +* you can aggregate "sql duration" for the whole task with this configuration : + +[source,ruby] +---------------------------------- + filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] + } + + if [logger] == "TASK_START" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] = 0" + map_action => "create" + } + } + + if [logger] == "SQL" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] += event.get('duration')" + map_action => "update" + } + } + + if [logger] == "TASK_END" { + aggregate { + task_id => "%{taskid}" + code => "event.set('sql_duration', map['sql_duration'])" + map_action => "update" + end_of_task => true + timeout => 120 + } + } + } +---------------------------------- + +* the final event then looks like : + +[source,ruby] +---------------------------------- +{ + "message" => "INFO - 12345 - TASK_END - end message", + "sql_duration" => 46 +} +---------------------------------- + +the field `sql_duration` is added and contains the sum of all sql queries durations. + + +[id="{version}-plugins-{type}s-{plugin}-example2"] +==== Example #2 : no start event + +* If you have the same logs than example #1, but without a start log : + +[source,ruby] +---------------------------------- + INFO - 12345 - SQL - sqlQuery1 - 12 + INFO - 12345 - SQL - sqlQuery2 - 34 + INFO - 12345 - TASK_END - end +---------------------------------- + +* you can also aggregate "sql duration" with a slightly different configuration : + +[source,ruby] +---------------------------------- + filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] + } + + if [logger] == "SQL" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')" + } + } + + if [logger] == "TASK_END" { + aggregate { + task_id => "%{taskid}" + code => "event.set('sql_duration', map['sql_duration'])" + end_of_task => true + timeout => 120 + } + } + } +---------------------------------- + +* the final event is exactly the same than example #1 +* the key point is the "||=" ruby operator. It allows to initialize 'sql_duration' map entry to 0 only if this map entry is not already initialized + + +[id="{version}-plugins-{type}s-{plugin}-example3"] +==== Example #3 : no end event + +Third use case: You have no specific end event. + +A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction. + +In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when a timeout occurs. +In addition, we can enable 'timeout_code' to execute code on the populated timeout event. +We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + +* Given these logs: + +[source,ruby] +---------------------------------- +INFO - 12345 - Clicked One +INFO - 12345 - Clicked Two +INFO - 12345 - Clicked Three +---------------------------------- + +* You can aggregate the amount of clicks the user did like this: + +[source,ruby] +---------------------------------- +filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] + } + + aggregate { + task_id => "%{user_id}" + code => "map['clicks'] ||= 0; map['clicks'] += 1;" + push_map_as_event_on_timeout => true + timeout_task_id_field => "user_id" + timeout => 600 # 10 minutes timeout + timeout_tags => ['_aggregatetimeout'] + timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" + } +} +---------------------------------- + +* After ten minutes, this will yield an event like: + +[source,json] +---------------------------------- +{ + "user_id": "12345", + "clicks": 3, + "several_clicks": true, + "tags": [ + "_aggregatetimeout" + ] +} +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-example4"] +==== Example #4 : no end event and tasks come one after the other + +Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other. + +That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, ... + +In that case, you don't want to wait task timeout to flush aggregation map. + + +* A typical case is aggregating results from jdbc input plugin. +* Given that you have this SQL query : `SELECT country_name, town_name FROM town` +* Using jdbc input plugin, you get these 3 events from : + +[source,json] +---------------------------------- + { "country_name": "France", "town_name": "Paris" } + { "country_name": "France", "town_name": "Marseille" } + { "country_name": "USA", "town_name": "New-York" } +---------------------------------- + +* And you would like these 2 result events to push them into elasticsearch : + +[source,json] +---------------------------------- + { "country_name": "France", "towns": [ {"town_name": "Paris"}, {"town_name": "Marseille"} ] } + { "country_name": "USA", "towns": [ {"town_name": "New-York"} ] } +---------------------------------- + +* You can do that using `push_previous_map_as_event` aggregate plugin option : + +[source,ruby] +---------------------------------- + filter { + aggregate { + task_id => "%{country_name}" + code => " + map['country_name'] = event.get('country_name') + map['towns'] ||= [] + map['towns'] << {'town_name' => event.get('town_name')} + event.cancel() + " + push_previous_map_as_event => true + timeout => 3 + } + } +---------------------------------- + +* The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next country +* When 5s timeout comes, the last aggregate map is pushed as a new event +* Finally, initial events (which are not aggregated) are dropped because useless (thanks to `event.cancel()`) + + +[id="{version}-plugins-{type}s-{plugin}-example5"] +==== Example #5 : no end event and push events as soon as possible + +Fifth use case: like example #3, there is no end event. + +Events keep comming for an indefinite time and you want to push the aggregation map as soon as possible after the last user interaction without waiting for the `timeout`. + +This allows to have the aggregated events pushed closer to real time. + + +A typical case is aggregating or tracking user behaviour. + +We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. + +There is no specific event indicating the end of the user's interaction. + +The user ineraction will be considered as ended when no events for the specified user (task_id) arrive after the specified inactivity_timeout`. + +If the user continues interacting for longer than `timeout` seconds (since first event), the aggregation map will still be deleted and pushed as a new event when timeout occurs. + +The difference with example #3 is that the events will be pushed as soon as the user stops interacting for `inactivity_timeout` seconds instead of waiting for the end of `timeout` seconds since first event. + +In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when inactivity timeout occurs. + +In addition, we can enable 'timeout_code' to execute code on the populated timeout event. + +We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + + +* Given these logs: + +[source,ruby] +---------------------------------- +INFO - 12345 - Clicked One +INFO - 12345 - Clicked Two +INFO - 12345 - Clicked Three +---------------------------------- + +* You can aggregate the amount of clicks the user did like this: + +[source,ruby] +---------------------------------- +filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] + } + aggregate { + task_id => "%{user_id}" + code => "map['clicks'] ||= 0; map['clicks'] += 1;" + push_map_as_event_on_timeout => true + timeout_task_id_field => "user_id" + timeout => 3600 # 1 hour timeout, user activity will be considered finished one hour after the first event, even if events keep comming + inactivity_timeout => 300 # 5 minutes timeout, user activity will be considered finished if no new events arrive 5 minutes after the last event + timeout_tags => ['_aggregatetimeout'] + timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" + } +} +---------------------------------- + +* After five minutes of inactivity or one hour since first event, this will yield an event like: + +[source,json] +---------------------------------- +{ + "user_id": "12345", + "clicks": 3, + "several_clicks": true, + "tags": [ + "_aggregatetimeout" + ] +} +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-howitworks"] +==== How it works +* the filter needs a "task_id" to correlate events (log lines) of a same task +* at the task beggining, filter creates a map, attached to task_id +* for each event, you can execute code using 'event' and 'map' (for instance, copy an event field to map) +* in the final event, you can execute a last code (for instance, add map data to final event) +* after the final event, the map attached to task is deleted (thanks to `end_of_task => true`) +* an aggregate map is tied to one task_id value which is tied to one task_id pattern. So if you have 2 filters with different task_id patterns, even if you have same task_id value, they won't share the same aggregate map. +* in one filter configuration, it is recommanded to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps +* if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted +* all timeout options have to be defined in only one aggregate filter per task_id pattern. Timeout options are : timeout, inactivity_timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags +* if `code` execution raises an exception, the error is logged and event is tagged '_aggregateexception' + + +[id="{version}-plugins-{type}s-{plugin}-usecases"] +==== Use Cases +* extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2) +* extract error information in any task log line, and push it in final task event (to get a final event with all error information if any) +* extract all back-end calls as a list, and push this list in final task event (to get a task profile) +* extract all http headers logged in several lines to push this list in final task event (complete http request info) +* for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, ...) +* Finally, task id can be any correlation id matching your need : it can be a session id, a file path, ... + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Aggregate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-aggregate_maps_path>> |<>, a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-end_of_task>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-map_action>> |<>, one of `["create", "update", "create_or_update"]`|No +| <<{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-push_previous_map_as_event>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-task_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_code>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_tags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_task_id_field>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-aggregate_maps_path"] +===== `aggregate_maps_path` + + * Value type is <> + * There is no default value for this setting. + +The path to file where aggregate maps are stored when Logstash stops +and are loaded from when Logstash starts. + +If not defined, aggregate maps will not be stored at Logstash stop and will be lost. +Must be defined in only one aggregate filter (as aggregate maps are global). + +Example: +[source,ruby] + filter { + aggregate { + aggregate_maps_path => "/path/to/.aggregate_maps" + } + } + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The code to execute to update map, using current event. + +Or on the contrary, the code to execute to update event, using current map. + +You will have a 'map' variable and an 'event' variable available (that is the event itself). + +Example: +[source,ruby] + filter { + aggregate { + code => "map['sql_duration'] += event.get('duration')" + } + } + +[id="{version}-plugins-{type}s-{plugin}-end_of_task"] +===== `end_of_task` + + * Value type is <> + * Default value is `false` + +Tell the filter that task is ended, and therefore, to delete aggregate map after code execution. + +[id="{version}-plugins-{type}s-{plugin}-inactivity_timeout"] +===== `inactivity_timeout` + + * Value type is <> + * There is no default value for this setting. + +The amount of seconds (since the last event) after which a task is considered as expired. + +When timeout occurs for a task, its aggregate map is evicted. + +If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. + +`inactivity_timeout` can be defined for each "task_id" pattern. + +`inactivity_timeout` must be lower than `timeout`. + +[id="{version}-plugins-{type}s-{plugin}-map_action"] +===== `map_action` + + * Value type is <> + * Default value is `"create_or_update"` + +Tell the filter what to do with aggregate map. + +`"create"`: create the map, and execute the code only if map wasn't created before + +`"update"`: doesn't create the map, and execute the code only if map was created before + +`"create_or_update"`: create the map if it wasn't created before, execute the code in all cases + +[id="{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout"] +===== `push_map_as_event_on_timeout` + + * Value type is <> + * Default value is `false` + +When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new Logstash event. +This enables to detect and process task timeouts in Logstash, but also to manage tasks that have no explicit end event. + +[id="{version}-plugins-{type}s-{plugin}-push_previous_map_as_event"] +===== `push_previous_map_as_event` + + * Value type is <> + * Default value is `false` + +When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new Logstash event, +and then creates a new empty map for the next task. + +WARNING: this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc... + +[id="{version}-plugins-{type}s-{plugin}-task_id"] +===== `task_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The expression defining task ID to correlate logs. + +This value must uniquely identify the task. + +Example: +[source,ruby] + filter { + aggregate { + task_id => "%{type}%{my_task_id}" + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `1800` + +The amount of seconds (since the first event) after which a task is considered as expired. + +When timeout occurs for a task, its aggregate map is evicted. + +If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. + +Timeout can be defined for each "task_id" pattern. + +[id="{version}-plugins-{type}s-{plugin}-timeout_code"] +===== `timeout_code` + + * Value type is <> + * There is no default value for this setting. + +The code to execute to complete timeout generated event, when `'push_map_as_event_on_timeout'` or `'push_previous_map_as_event'` is set to true. +The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map. + +If `'timeout_task_id_field'` is set, the event is also populated with the task_id value + +Example: +[source,ruby] + filter { + aggregate { + timeout_code => "event.set('state', 'timeout')" + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout_tags"] +===== `timeout_tags` + + * Value type is <> + * Default value is `[]` + +Defines tags to add when a timeout event is generated and yield + +Example: +[source,ruby] + filter { + aggregate { + timeout_tags => ["aggregate_timeout'] + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout_task_id_field"] +===== `timeout_task_id_field` + + * Value type is <> + * There is no default value for this setting. + +This option indicates the timeout generated event's field for the "task_id" value. +The task id will then be set into the timeout event. This can help correlate which tasks have been timed out. + +For example, with option `timeout_task_id_field => "my_id"` ,when timeout task id is `"12345"`, the generated timeout event will contain `'my_id' => '12345'`. + +By default, if this option is not set, task id value won't be set into timeout generated event. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/aggregate-v2.6.3.asciidoc b/docs/versioned-plugins/filters/aggregate-v2.6.3.asciidoc new file mode 100644 index 000000000..6c50f8c70 --- /dev/null +++ b/docs/versioned-plugins/filters/aggregate-v2.6.3.asciidoc @@ -0,0 +1,542 @@ +:plugin: aggregate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.6.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-aggregate/blob/v2.6.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Aggregate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + + +[id="{version}-plugins-{type}s-{plugin}-description"] +==== Description + + +The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, +and finally push aggregated information into final task event. + +You should be very careful to set Logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly +otherwise events may be processed out of sequence and unexpected results will occur. + + +[id="{version}-plugins-{type}s-{plugin}-example1"] +==== Example #1 + +* with these given logs : + +[source,ruby] +---------------------------------- + INFO - 12345 - TASK_START - start + INFO - 12345 - SQL - sqlQuery1 - 12 + INFO - 12345 - SQL - sqlQuery2 - 34 + INFO - 12345 - TASK_END - end +---------------------------------- + +* you can aggregate "sql duration" for the whole task with this configuration : + +[source,ruby] +---------------------------------- + filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] + } + + if [logger] == "TASK_START" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] = 0" + map_action => "create" + } + } + + if [logger] == "SQL" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] += event.get('duration')" + map_action => "update" + } + } + + if [logger] == "TASK_END" { + aggregate { + task_id => "%{taskid}" + code => "event.set('sql_duration', map['sql_duration'])" + map_action => "update" + end_of_task => true + timeout => 120 + } + } + } +---------------------------------- + +* the final event then looks like : + +[source,ruby] +---------------------------------- +{ + "message" => "INFO - 12345 - TASK_END - end message", + "sql_duration" => 46 +} +---------------------------------- + +the field `sql_duration` is added and contains the sum of all sql queries durations. + + +[id="{version}-plugins-{type}s-{plugin}-example2"] +==== Example #2 : no start event + +* If you have the same logs than example #1, but without a start log : + +[source,ruby] +---------------------------------- + INFO - 12345 - SQL - sqlQuery1 - 12 + INFO - 12345 - SQL - sqlQuery2 - 34 + INFO - 12345 - TASK_END - end +---------------------------------- + +* you can also aggregate "sql duration" with a slightly different configuration : + +[source,ruby] +---------------------------------- + filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] + } + + if [logger] == "SQL" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')" + } + } + + if [logger] == "TASK_END" { + aggregate { + task_id => "%{taskid}" + code => "event.set('sql_duration', map['sql_duration'])" + end_of_task => true + timeout => 120 + } + } + } +---------------------------------- + +* the final event is exactly the same than example #1 +* the key point is the "||=" ruby operator. It allows to initialize 'sql_duration' map entry to 0 only if this map entry is not already initialized + + +[id="{version}-plugins-{type}s-{plugin}-example3"] +==== Example #3 : no end event + +Third use case: You have no specific end event. + +A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction. + +In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when a timeout occurs. +In addition, we can enable 'timeout_code' to execute code on the populated timeout event. +We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + +* Given these logs: + +[source,ruby] +---------------------------------- +INFO - 12345 - Clicked One +INFO - 12345 - Clicked Two +INFO - 12345 - Clicked Three +---------------------------------- + +* You can aggregate the amount of clicks the user did like this: + +[source,ruby] +---------------------------------- +filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] + } + + aggregate { + task_id => "%{user_id}" + code => "map['clicks'] ||= 0; map['clicks'] += 1;" + push_map_as_event_on_timeout => true + timeout_task_id_field => "user_id" + timeout => 600 # 10 minutes timeout + timeout_tags => ['_aggregatetimeout'] + timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" + } +} +---------------------------------- + +* After ten minutes, this will yield an event like: + +[source,json] +---------------------------------- +{ + "user_id": "12345", + "clicks": 3, + "several_clicks": true, + "tags": [ + "_aggregatetimeout" + ] +} +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-example4"] +==== Example #4 : no end event and tasks come one after the other + +Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other. + +That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, ... + +In that case, you don't want to wait task timeout to flush aggregation map. + + +* A typical case is aggregating results from jdbc input plugin. +* Given that you have this SQL query : `SELECT country_name, town_name FROM town` +* Using jdbc input plugin, you get these 3 events from : + +[source,json] +---------------------------------- + { "country_name": "France", "town_name": "Paris" } + { "country_name": "France", "town_name": "Marseille" } + { "country_name": "USA", "town_name": "New-York" } +---------------------------------- + +* And you would like these 2 result events to push them into elasticsearch : + +[source,json] +---------------------------------- + { "country_name": "France", "towns": [ {"town_name": "Paris"}, {"town_name": "Marseille"} ] } + { "country_name": "USA", "towns": [ {"town_name": "New-York"} ] } +---------------------------------- + +* You can do that using `push_previous_map_as_event` aggregate plugin option : + +[source,ruby] +---------------------------------- + filter { + aggregate { + task_id => "%{country_name}" + code => " + map['country_name'] = event.get('country_name') + map['towns'] ||= [] + map['towns'] << {'town_name' => event.get('town_name')} + event.cancel() + " + push_previous_map_as_event => true + timeout => 3 + } + } +---------------------------------- + +* The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next country +* When 5s timeout comes, the last aggregate map is pushed as a new event +* Finally, initial events (which are not aggregated) are dropped because useless (thanks to `event.cancel()`) + + +[id="{version}-plugins-{type}s-{plugin}-example5"] +==== Example #5 : no end event and push events as soon as possible + +Fifth use case: like example #3, there is no end event. + +Events keep comming for an indefinite time and you want to push the aggregation map as soon as possible after the last user interaction without waiting for the `timeout`. + +This allows to have the aggregated events pushed closer to real time. + + +A typical case is aggregating or tracking user behaviour. + +We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. + +There is no specific event indicating the end of the user's interaction. + +The user ineraction will be considered as ended when no events for the specified user (task_id) arrive after the specified inactivity_timeout`. + +If the user continues interacting for longer than `timeout` seconds (since first event), the aggregation map will still be deleted and pushed as a new event when timeout occurs. + +The difference with example #3 is that the events will be pushed as soon as the user stops interacting for `inactivity_timeout` seconds instead of waiting for the end of `timeout` seconds since first event. + +In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when inactivity timeout occurs. + +In addition, we can enable 'timeout_code' to execute code on the populated timeout event. + +We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + + +* Given these logs: + +[source,ruby] +---------------------------------- +INFO - 12345 - Clicked One +INFO - 12345 - Clicked Two +INFO - 12345 - Clicked Three +---------------------------------- + +* You can aggregate the amount of clicks the user did like this: + +[source,ruby] +---------------------------------- +filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] + } + aggregate { + task_id => "%{user_id}" + code => "map['clicks'] ||= 0; map['clicks'] += 1;" + push_map_as_event_on_timeout => true + timeout_task_id_field => "user_id" + timeout => 3600 # 1 hour timeout, user activity will be considered finished one hour after the first event, even if events keep comming + inactivity_timeout => 300 # 5 minutes timeout, user activity will be considered finished if no new events arrive 5 minutes after the last event + timeout_tags => ['_aggregatetimeout'] + timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" + } +} +---------------------------------- + +* After five minutes of inactivity or one hour since first event, this will yield an event like: + +[source,json] +---------------------------------- +{ + "user_id": "12345", + "clicks": 3, + "several_clicks": true, + "tags": [ + "_aggregatetimeout" + ] +} +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-howitworks"] +==== How it works +* the filter needs a "task_id" to correlate events (log lines) of a same task +* at the task beginning, filter creates a map, attached to task_id +* for each event, you can execute code using 'event' and 'map' (for instance, copy an event field to map) +* in the final event, you can execute a last code (for instance, add map data to final event) +* after the final event, the map attached to task is deleted (thanks to `end_of_task => true`) +* an aggregate map is tied to one task_id value which is tied to one task_id pattern. So if you have 2 filters with different task_id patterns, even if you have same task_id value, they won't share the same aggregate map. +* in one filter configuration, it is recommanded to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps +* if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted +* all timeout options have to be defined in only one aggregate filter per task_id pattern. Timeout options are : timeout, inactivity_timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags +* if `code` execution raises an exception, the error is logged and event is tagged '_aggregateexception' + + +[id="{version}-plugins-{type}s-{plugin}-usecases"] +==== Use Cases +* extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2) +* extract error information in any task log line, and push it in final task event (to get a final event with all error information if any) +* extract all back-end calls as a list, and push this list in final task event (to get a task profile) +* extract all http headers logged in several lines to push this list in final task event (complete http request info) +* for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, ...) +* Finally, task id can be any correlation id matching your need : it can be a session id, a file path, ... + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Aggregate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-aggregate_maps_path>> |<>, a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-end_of_task>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-map_action>> |<>, one of `["create", "update", "create_or_update"]`|No +| <<{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-push_previous_map_as_event>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-task_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_code>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_tags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_task_id_field>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-aggregate_maps_path"] +===== `aggregate_maps_path` + + * Value type is <> + * There is no default value for this setting. + +The path to file where aggregate maps are stored when Logstash stops +and are loaded from when Logstash starts. + +If not defined, aggregate maps will not be stored at Logstash stop and will be lost. +Must be defined in only one aggregate filter (as aggregate maps are global). + +Example: +[source,ruby] + filter { + aggregate { + aggregate_maps_path => "/path/to/.aggregate_maps" + } + } + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The code to execute to update map, using current event. + +Or on the contrary, the code to execute to update event, using current map. + +You will have a 'map' variable and an 'event' variable available (that is the event itself). + +Example: +[source,ruby] + filter { + aggregate { + code => "map['sql_duration'] += event.get('duration')" + } + } + +[id="{version}-plugins-{type}s-{plugin}-end_of_task"] +===== `end_of_task` + + * Value type is <> + * Default value is `false` + +Tell the filter that task is ended, and therefore, to delete aggregate map after code execution. + +[id="{version}-plugins-{type}s-{plugin}-inactivity_timeout"] +===== `inactivity_timeout` + + * Value type is <> + * There is no default value for this setting. + +The amount of seconds (since the last event) after which a task is considered as expired. + +When timeout occurs for a task, its aggregate map is evicted. + +If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. + +`inactivity_timeout` can be defined for each "task_id" pattern. + +`inactivity_timeout` must be lower than `timeout`. + +[id="{version}-plugins-{type}s-{plugin}-map_action"] +===== `map_action` + + * Value type is <> + * Default value is `"create_or_update"` + +Tell the filter what to do with aggregate map. + +`"create"`: create the map, and execute the code only if map wasn't created before + +`"update"`: doesn't create the map, and execute the code only if map was created before + +`"create_or_update"`: create the map if it wasn't created before, execute the code in all cases + +[id="{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout"] +===== `push_map_as_event_on_timeout` + + * Value type is <> + * Default value is `false` + +When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new Logstash event. +This enables to detect and process task timeouts in Logstash, but also to manage tasks that have no explicit end event. + +[id="{version}-plugins-{type}s-{plugin}-push_previous_map_as_event"] +===== `push_previous_map_as_event` + + * Value type is <> + * Default value is `false` + +When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new Logstash event, +and then creates a new empty map for the next task. + +WARNING: this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc... + +[id="{version}-plugins-{type}s-{plugin}-task_id"] +===== `task_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The expression defining task ID to correlate logs. + +This value must uniquely identify the task. + +Example: +[source,ruby] + filter { + aggregate { + task_id => "%{type}%{my_task_id}" + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `1800` + +The amount of seconds (since the first event) after which a task is considered as expired. + +When timeout occurs for a task, its aggregate map is evicted. + +If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. + +Timeout can be defined for each "task_id" pattern. + +[id="{version}-plugins-{type}s-{plugin}-timeout_code"] +===== `timeout_code` + + * Value type is <> + * There is no default value for this setting. + +The code to execute to complete timeout generated event, when `'push_map_as_event_on_timeout'` or `'push_previous_map_as_event'` is set to true. +The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map. + +If `'timeout_task_id_field'` is set, the event is also populated with the task_id value + +Example: +[source,ruby] + filter { + aggregate { + timeout_code => "event.set('state', 'timeout')" + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout_tags"] +===== `timeout_tags` + + * Value type is <> + * Default value is `[]` + +Defines tags to add when a timeout event is generated and yield + +Example: +[source,ruby] + filter { + aggregate { + timeout_tags => ["aggregate_timeout'] + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout_task_id_field"] +===== `timeout_task_id_field` + + * Value type is <> + * There is no default value for this setting. + +This option indicates the timeout generated event's field for the "task_id" value. +The task id will then be set into the timeout event. This can help correlate which tasks have been timed out. + +For example, with option `timeout_task_id_field => "my_id"` ,when timeout task id is `"12345"`, the generated timeout event will contain `'my_id' => '12345'`. + +By default, if this option is not set, task id value won't be set into timeout generated event. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/aggregate-v2.6.4.asciidoc b/docs/versioned-plugins/filters/aggregate-v2.6.4.asciidoc new file mode 100644 index 000000000..c4e816ba0 --- /dev/null +++ b/docs/versioned-plugins/filters/aggregate-v2.6.4.asciidoc @@ -0,0 +1,542 @@ +:plugin: aggregate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.6.4 +:release_date: 2017-10-10 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-aggregate/blob/v2.6.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Aggregate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + + +[id="{version}-plugins-{type}s-{plugin}-description"] +==== Description + + +The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, +and finally push aggregated information into final task event. + +You should be very careful to set Logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly +otherwise events may be processed out of sequence and unexpected results will occur. + + +[id="{version}-plugins-{type}s-{plugin}-example1"] +==== Example #1 + +* with these given logs : + +[source,ruby] +---------------------------------- + INFO - 12345 - TASK_START - start + INFO - 12345 - SQL - sqlQuery1 - 12 + INFO - 12345 - SQL - sqlQuery2 - 34 + INFO - 12345 - TASK_END - end +---------------------------------- + +* you can aggregate "sql duration" for the whole task with this configuration : + +[source,ruby] +---------------------------------- + filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] + } + + if [logger] == "TASK_START" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] = 0" + map_action => "create" + } + } + + if [logger] == "SQL" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] += event.get('duration')" + map_action => "update" + } + } + + if [logger] == "TASK_END" { + aggregate { + task_id => "%{taskid}" + code => "event.set('sql_duration', map['sql_duration'])" + map_action => "update" + end_of_task => true + timeout => 120 + } + } + } +---------------------------------- + +* the final event then looks like : + +[source,ruby] +---------------------------------- +{ + "message" => "INFO - 12345 - TASK_END - end message", + "sql_duration" => 46 +} +---------------------------------- + +the field `sql_duration` is added and contains the sum of all sql queries durations. + + +[id="{version}-plugins-{type}s-{plugin}-example2"] +==== Example #2 : no start event + +* If you have the same logs than example #1, but without a start log : + +[source,ruby] +---------------------------------- + INFO - 12345 - SQL - sqlQuery1 - 12 + INFO - 12345 - SQL - sqlQuery2 - 34 + INFO - 12345 - TASK_END - end +---------------------------------- + +* you can also aggregate "sql duration" with a slightly different configuration : + +[source,ruby] +---------------------------------- + filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] + } + + if [logger] == "SQL" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')" + } + } + + if [logger] == "TASK_END" { + aggregate { + task_id => "%{taskid}" + code => "event.set('sql_duration', map['sql_duration'])" + end_of_task => true + timeout => 120 + } + } + } +---------------------------------- + +* the final event is exactly the same than example #1 +* the key point is the "||=" ruby operator. It allows to initialize 'sql_duration' map entry to 0 only if this map entry is not already initialized + + +[id="{version}-plugins-{type}s-{plugin}-example3"] +==== Example #3 : no end event + +Third use case: You have no specific end event. + +A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction. + +In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when a timeout occurs. +In addition, we can enable 'timeout_code' to execute code on the populated timeout event. +We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + +* Given these logs: + +[source,ruby] +---------------------------------- +INFO - 12345 - Clicked One +INFO - 12345 - Clicked Two +INFO - 12345 - Clicked Three +---------------------------------- + +* You can aggregate the amount of clicks the user did like this: + +[source,ruby] +---------------------------------- +filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] + } + + aggregate { + task_id => "%{user_id}" + code => "map['clicks'] ||= 0; map['clicks'] += 1;" + push_map_as_event_on_timeout => true + timeout_task_id_field => "user_id" + timeout => 600 # 10 minutes timeout + timeout_tags => ['_aggregatetimeout'] + timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" + } +} +---------------------------------- + +* After ten minutes, this will yield an event like: + +[source,json] +---------------------------------- +{ + "user_id": "12345", + "clicks": 3, + "several_clicks": true, + "tags": [ + "_aggregatetimeout" + ] +} +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-example4"] +==== Example #4 : no end event and tasks come one after the other + +Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other. + +That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, ... + +In that case, you don't want to wait task timeout to flush aggregation map. + + +* A typical case is aggregating results from jdbc input plugin. +* Given that you have this SQL query : `SELECT country_name, town_name FROM town` +* Using jdbc input plugin, you get these 3 events from : + +[source,json] +---------------------------------- + { "country_name": "France", "town_name": "Paris" } + { "country_name": "France", "town_name": "Marseille" } + { "country_name": "USA", "town_name": "New-York" } +---------------------------------- + +* And you would like these 2 result events to push them into elasticsearch : + +[source,json] +---------------------------------- + { "country_name": "France", "towns": [ {"town_name": "Paris"}, {"town_name": "Marseille"} ] } + { "country_name": "USA", "towns": [ {"town_name": "New-York"} ] } +---------------------------------- + +* You can do that using `push_previous_map_as_event` aggregate plugin option : + +[source,ruby] +---------------------------------- + filter { + aggregate { + task_id => "%{country_name}" + code => " + map['country_name'] = event.get('country_name') + map['towns'] ||= [] + map['towns'] << {'town_name' => event.get('town_name')} + event.cancel() + " + push_previous_map_as_event => true + timeout => 3 + } + } +---------------------------------- + +* The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next country +* When 5s timeout comes, the last aggregate map is pushed as a new event +* Finally, initial events (which are not aggregated) are dropped because useless (thanks to `event.cancel()`) + + +[id="{version}-plugins-{type}s-{plugin}-example5"] +==== Example #5 : no end event and push events as soon as possible + +Fifth use case: like example #3, there is no end event. + +Events keep comming for an indefinite time and you want to push the aggregation map as soon as possible after the last user interaction without waiting for the `timeout`. + +This allows to have the aggregated events pushed closer to real time. + + +A typical case is aggregating or tracking user behaviour. + +We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. + +There is no specific event indicating the end of the user's interaction. + +The user ineraction will be considered as ended when no events for the specified user (task_id) arrive after the specified inactivity_timeout`. + +If the user continues interacting for longer than `timeout` seconds (since first event), the aggregation map will still be deleted and pushed as a new event when timeout occurs. + +The difference with example #3 is that the events will be pushed as soon as the user stops interacting for `inactivity_timeout` seconds instead of waiting for the end of `timeout` seconds since first event. + +In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when inactivity timeout occurs. + +In addition, we can enable 'timeout_code' to execute code on the populated timeout event. + +We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + + +* Given these logs: + +[source,ruby] +---------------------------------- +INFO - 12345 - Clicked One +INFO - 12345 - Clicked Two +INFO - 12345 - Clicked Three +---------------------------------- + +* You can aggregate the amount of clicks the user did like this: + +[source,ruby] +---------------------------------- +filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] + } + aggregate { + task_id => "%{user_id}" + code => "map['clicks'] ||= 0; map['clicks'] += 1;" + push_map_as_event_on_timeout => true + timeout_task_id_field => "user_id" + timeout => 3600 # 1 hour timeout, user activity will be considered finished one hour after the first event, even if events keep comming + inactivity_timeout => 300 # 5 minutes timeout, user activity will be considered finished if no new events arrive 5 minutes after the last event + timeout_tags => ['_aggregatetimeout'] + timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" + } +} +---------------------------------- + +* After five minutes of inactivity or one hour since first event, this will yield an event like: + +[source,json] +---------------------------------- +{ + "user_id": "12345", + "clicks": 3, + "several_clicks": true, + "tags": [ + "_aggregatetimeout" + ] +} +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-howitworks"] +==== How it works +* the filter needs a "task_id" to correlate events (log lines) of a same task +* at the task beginning, filter creates a map, attached to task_id +* for each event, you can execute code using 'event' and 'map' (for instance, copy an event field to map) +* in the final event, you can execute a last code (for instance, add map data to final event) +* after the final event, the map attached to task is deleted (thanks to `end_of_task => true`) +* an aggregate map is tied to one task_id value which is tied to one task_id pattern. So if you have 2 filters with different task_id patterns, even if you have same task_id value, they won't share the same aggregate map. +* in one filter configuration, it is recommanded to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps +* if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted +* all timeout options have to be defined in only one aggregate filter per task_id pattern. Timeout options are : timeout, inactivity_timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags +* if `code` execution raises an exception, the error is logged and event is tagged '_aggregateexception' + + +[id="{version}-plugins-{type}s-{plugin}-usecases"] +==== Use Cases +* extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2) +* extract error information in any task log line, and push it in final task event (to get a final event with all error information if any) +* extract all back-end calls as a list, and push this list in final task event (to get a task profile) +* extract all http headers logged in several lines to push this list in final task event (complete http request info) +* for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, ...) +* Finally, task id can be any correlation id matching your need : it can be a session id, a file path, ... + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Aggregate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-aggregate_maps_path>> |<>, a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-end_of_task>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-map_action>> |<>, one of `["create", "update", "create_or_update"]`|No +| <<{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-push_previous_map_as_event>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-task_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_code>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_tags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_task_id_field>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-aggregate_maps_path"] +===== `aggregate_maps_path` + + * Value type is <> + * There is no default value for this setting. + +The path to file where aggregate maps are stored when Logstash stops +and are loaded from when Logstash starts. + +If not defined, aggregate maps will not be stored at Logstash stop and will be lost. +Must be defined in only one aggregate filter (as aggregate maps are global). + +Example: +[source,ruby] + filter { + aggregate { + aggregate_maps_path => "/path/to/.aggregate_maps" + } + } + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The code to execute to update map, using current event. + +Or on the contrary, the code to execute to update event, using current map. + +You will have a 'map' variable and an 'event' variable available (that is the event itself). + +Example: +[source,ruby] + filter { + aggregate { + code => "map['sql_duration'] += event.get('duration')" + } + } + +[id="{version}-plugins-{type}s-{plugin}-end_of_task"] +===== `end_of_task` + + * Value type is <> + * Default value is `false` + +Tell the filter that task is ended, and therefore, to delete aggregate map after code execution. + +[id="{version}-plugins-{type}s-{plugin}-inactivity_timeout"] +===== `inactivity_timeout` + + * Value type is <> + * There is no default value for this setting. + +The amount of seconds (since the last event) after which a task is considered as expired. + +When timeout occurs for a task, its aggregate map is evicted. + +If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. + +`inactivity_timeout` can be defined for each "task_id" pattern. + +`inactivity_timeout` must be lower than `timeout`. + +[id="{version}-plugins-{type}s-{plugin}-map_action"] +===== `map_action` + + * Value type is <> + * Default value is `"create_or_update"` + +Tell the filter what to do with aggregate map. + +`"create"`: create the map, and execute the code only if map wasn't created before + +`"update"`: doesn't create the map, and execute the code only if map was created before + +`"create_or_update"`: create the map if it wasn't created before, execute the code in all cases + +[id="{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout"] +===== `push_map_as_event_on_timeout` + + * Value type is <> + * Default value is `false` + +When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new Logstash event. +This enables to detect and process task timeouts in Logstash, but also to manage tasks that have no explicit end event. + +[id="{version}-plugins-{type}s-{plugin}-push_previous_map_as_event"] +===== `push_previous_map_as_event` + + * Value type is <> + * Default value is `false` + +When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new Logstash event, +and then creates a new empty map for the next task. + +WARNING: this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc... + +[id="{version}-plugins-{type}s-{plugin}-task_id"] +===== `task_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The expression defining task ID to correlate logs. + +This value must uniquely identify the task. + +Example: +[source,ruby] + filter { + aggregate { + task_id => "%{type}%{my_task_id}" + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `1800` + +The amount of seconds (since the first event) after which a task is considered as expired. + +When timeout occurs for a task, its aggregate map is evicted. + +If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. + +Timeout can be defined for each "task_id" pattern. + +[id="{version}-plugins-{type}s-{plugin}-timeout_code"] +===== `timeout_code` + + * Value type is <> + * There is no default value for this setting. + +The code to execute to complete timeout generated event, when `'push_map_as_event_on_timeout'` or `'push_previous_map_as_event'` is set to true. +The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map. + +If `'timeout_task_id_field'` is set, the event is also populated with the task_id value + +Example: +[source,ruby] + filter { + aggregate { + timeout_code => "event.set('state', 'timeout')" + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout_tags"] +===== `timeout_tags` + + * Value type is <> + * Default value is `[]` + +Defines tags to add when a timeout event is generated and yield + +Example: +[source,ruby] + filter { + aggregate { + timeout_tags => ["aggregate_timeout'] + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout_task_id_field"] +===== `timeout_task_id_field` + + * Value type is <> + * There is no default value for this setting. + +This option indicates the timeout generated event's field for the "task_id" value. +The task id will then be set into the timeout event. This can help correlate which tasks have been timed out. + +For example, with option `timeout_task_id_field => "my_id"` ,when timeout task id is `"12345"`, the generated timeout event will contain `'my_id' => '12345'`. + +By default, if this option is not set, task id value won't be set into timeout generated event. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/aggregate-v2.7.0.asciidoc b/docs/versioned-plugins/filters/aggregate-v2.7.0.asciidoc new file mode 100644 index 000000000..2ed7e5eb5 --- /dev/null +++ b/docs/versioned-plugins/filters/aggregate-v2.7.0.asciidoc @@ -0,0 +1,555 @@ +:plugin: aggregate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.7.0 +:release_date: 2017-11-03 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-aggregate/blob/v2.7.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Aggregate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + + +[id="{version}-plugins-{type}s-{plugin}-description"] +==== Description + + +The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, +and finally push aggregated information into final task event. + +You should be very careful to set Logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly +otherwise events may be processed out of sequence and unexpected results will occur. + + +[id="{version}-plugins-{type}s-{plugin}-example1"] +==== Example #1 + +* with these given logs : + +[source,ruby] +---------------------------------- + INFO - 12345 - TASK_START - start + INFO - 12345 - SQL - sqlQuery1 - 12 + INFO - 12345 - SQL - sqlQuery2 - 34 + INFO - 12345 - TASK_END - end +---------------------------------- + +* you can aggregate "sql duration" for the whole task with this configuration : + +[source,ruby] +---------------------------------- + filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] + } + + if [logger] == "TASK_START" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] = 0" + map_action => "create" + } + } + + if [logger] == "SQL" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] += event.get('duration')" + map_action => "update" + } + } + + if [logger] == "TASK_END" { + aggregate { + task_id => "%{taskid}" + code => "event.set('sql_duration', map['sql_duration'])" + map_action => "update" + end_of_task => true + timeout => 120 + } + } + } +---------------------------------- + +* the final event then looks like : + +[source,ruby] +---------------------------------- +{ + "message" => "INFO - 12345 - TASK_END - end message", + "sql_duration" => 46 +} +---------------------------------- + +the field `sql_duration` is added and contains the sum of all sql queries durations. + + +[id="{version}-plugins-{type}s-{plugin}-example2"] +==== Example #2 : no start event + +* If you have the same logs than example #1, but without a start log : + +[source,ruby] +---------------------------------- + INFO - 12345 - SQL - sqlQuery1 - 12 + INFO - 12345 - SQL - sqlQuery2 - 34 + INFO - 12345 - TASK_END - end +---------------------------------- + +* you can also aggregate "sql duration" with a slightly different configuration : + +[source,ruby] +---------------------------------- + filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] + } + + if [logger] == "SQL" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')" + } + } + + if [logger] == "TASK_END" { + aggregate { + task_id => "%{taskid}" + code => "event.set('sql_duration', map['sql_duration'])" + end_of_task => true + timeout => 120 + } + } + } +---------------------------------- + +* the final event is exactly the same than example #1 +* the key point is the "||=" ruby operator. It allows to initialize 'sql_duration' map entry to 0 only if this map entry is not already initialized + + +[id="{version}-plugins-{type}s-{plugin}-example3"] +==== Example #3 : no end event + +Third use case: You have no specific end event. + +A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction. + +In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when a timeout occurs. +In addition, we can enable 'timeout_code' to execute code on the populated timeout event. +We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + +* Given these logs: + +[source,ruby] +---------------------------------- +INFO - 12345 - Clicked One +INFO - 12345 - Clicked Two +INFO - 12345 - Clicked Three +---------------------------------- + +* You can aggregate the amount of clicks the user did like this: + +[source,ruby] +---------------------------------- +filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] + } + + aggregate { + task_id => "%{user_id}" + code => "map['clicks'] ||= 0; map['clicks'] += 1;" + push_map_as_event_on_timeout => true + timeout_task_id_field => "user_id" + timeout => 600 # 10 minutes timeout + timeout_tags => ['_aggregatetimeout'] + timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" + } +} +---------------------------------- + +* After ten minutes, this will yield an event like: + +[source,json] +---------------------------------- +{ + "user_id": "12345", + "clicks": 3, + "several_clicks": true, + "tags": [ + "_aggregatetimeout" + ] +} +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-example4"] +==== Example #4 : no end event and tasks come one after the other + +Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other. + +That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, ... + +In that case, you don't want to wait task timeout to flush aggregation map. + +* A typical case is aggregating results from jdbc input plugin. +* Given that you have this SQL query : `SELECT country_name, town_name FROM town` +* Using jdbc input plugin, you get these 3 events from : + +[source,json] +---------------------------------- + { "country_name": "France", "town_name": "Paris" } + { "country_name": "France", "town_name": "Marseille" } + { "country_name": "USA", "town_name": "New-York" } +---------------------------------- + +* And you would like these 2 result events to push them into elasticsearch : + +[source,json] +---------------------------------- + { "country_name": "France", "towns": [ {"town_name": "Paris"}, {"town_name": "Marseille"} ] } + { "country_name": "USA", "towns": [ {"town_name": "New-York"} ] } +---------------------------------- + +* You can do that using `push_previous_map_as_event` aggregate plugin option : + +[source,ruby] +---------------------------------- + filter { + aggregate { + task_id => "%{country_name}" + code => " + map['country_name'] = event.get('country_name') + map['towns'] ||= [] + map['towns'] << {'town_name' => event.get('town_name')} + event.cancel() + " + push_previous_map_as_event => true + timeout => 3 + } + } +---------------------------------- + +* The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next country +* When 5s timeout comes, the last aggregate map is pushed as a new event +* Finally, initial events (which are not aggregated) are dropped because useless (thanks to `event.cancel()`) + + +[id="{version}-plugins-{type}s-{plugin}-example5"] +==== Example #5 : no end event and push events as soon as possible + +Fifth use case: like example #3, there is no end event. + +Events keep comming for an indefinite time and you want to push the aggregation map as soon as possible after the last user interaction without waiting for the `timeout`. + +This allows to have the aggregated events pushed closer to real time. + + +A typical case is aggregating or tracking user behaviour. + +We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. + +There is no specific event indicating the end of the user's interaction. + +The user ineraction will be considered as ended when no events for the specified user (task_id) arrive after the specified inactivity_timeout`. + +If the user continues interacting for longer than `timeout` seconds (since first event), the aggregation map will still be deleted and pushed as a new event when timeout occurs. + +The difference with example #3 is that the events will be pushed as soon as the user stops interacting for `inactivity_timeout` seconds instead of waiting for the end of `timeout` seconds since first event. + +In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when inactivity timeout occurs. + +In addition, we can enable 'timeout_code' to execute code on the populated timeout event. + +We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + + +* Given these logs: + +[source,ruby] +---------------------------------- +INFO - 12345 - Clicked One +INFO - 12345 - Clicked Two +INFO - 12345 - Clicked Three +---------------------------------- + +* You can aggregate the amount of clicks the user did like this: + +[source,ruby] +---------------------------------- +filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] + } + aggregate { + task_id => "%{user_id}" + code => "map['clicks'] ||= 0; map['clicks'] += 1;" + push_map_as_event_on_timeout => true + timeout_task_id_field => "user_id" + timeout => 3600 # 1 hour timeout, user activity will be considered finished one hour after the first event, even if events keep comming + inactivity_timeout => 300 # 5 minutes timeout, user activity will be considered finished if no new events arrive 5 minutes after the last event + timeout_tags => ['_aggregatetimeout'] + timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" + } +} +---------------------------------- + +* After five minutes of inactivity or one hour since first event, this will yield an event like: + +[source,json] +---------------------------------- +{ + "user_id": "12345", + "clicks": 3, + "several_clicks": true, + "tags": [ + "_aggregatetimeout" + ] +} +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-howitworks"] +==== How it works +* the filter needs a "task_id" to correlate events (log lines) of a same task +* at the task beginning, filter creates a map, attached to task_id +* for each event, you can execute code using 'event' and 'map' (for instance, copy an event field to map) +* in the final event, you can execute a last code (for instance, add map data to final event) +* after the final event, the map attached to task is deleted (thanks to `end_of_task => true`) +* an aggregate map is tied to one task_id value which is tied to one task_id pattern. So if you have 2 filters with different task_id patterns, even if you have same task_id value, they won't share the same aggregate map. +* in one filter configuration, it is recommanded to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps +* if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted +* all timeout options have to be defined in only one aggregate filter per task_id pattern (per pipeline). Timeout options are : timeout, inactivity_timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags +* if `code` execution raises an exception, the error is logged and event is tagged '_aggregateexception' + + +[id="{version}-plugins-{type}s-{plugin}-usecases"] +==== Use Cases +* extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2) +* extract error information in any task log line, and push it in final task event (to get a final event with all error information if any) +* extract all back-end calls as a list, and push this list in final task event (to get a task profile) +* extract all http headers logged in several lines to push this list in final task event (complete http request info) +* for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, ...) +* Finally, task id can be any correlation id matching your need : it can be a session id, a file path, ... + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Aggregate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-aggregate_maps_path>> |<>, a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-end_of_task>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-map_action>> |<>, one of `["create", "update", "create_or_update"]`|No +| <<{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-push_previous_map_as_event>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-task_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_code>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_tags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_task_id_field>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-aggregate_maps_path"] +===== `aggregate_maps_path` + + * Value type is <> + * There is no default value for this setting. + +The path to file where aggregate maps are stored when Logstash stops +and are loaded from when Logstash starts. + +If not defined, aggregate maps will not be stored at Logstash stop and will be lost. +Must be defined in only one aggregate filter per pipeline (as aggregate maps are shared at pipeline level). + +Example: +[source,ruby] + filter { + aggregate { + aggregate_maps_path => "/path/to/.aggregate_maps" + } + } + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The code to execute to update map, using current event. + +Or on the contrary, the code to execute to update event, using current map. + +You will have a 'map' variable and an 'event' variable available (that is the event itself). + +Example: +[source,ruby] + filter { + aggregate { + code => "map['sql_duration'] += event.get('duration')" + } + } + +[id="{version}-plugins-{type}s-{plugin}-end_of_task"] +===== `end_of_task` + + * Value type is <> + * Default value is `false` + +Tell the filter that task is ended, and therefore, to delete aggregate map after code execution. + +[id="{version}-plugins-{type}s-{plugin}-inactivity_timeout"] +===== `inactivity_timeout` + + * Value type is <> + * There is no default value for this setting. + +The amount of seconds (since the last event) after which a task is considered as expired. + +When timeout occurs for a task, its aggregate map is evicted. + +If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. + +`inactivity_timeout` can be defined for each "task_id" pattern. + +`inactivity_timeout` must be lower than `timeout`. + +[id="{version}-plugins-{type}s-{plugin}-map_action"] +===== `map_action` + + * Value type is <> + * Default value is `"create_or_update"` + +Tell the filter what to do with aggregate map. + +`"create"`: create the map, and execute the code only if map wasn't created before + +`"update"`: doesn't create the map, and execute the code only if map was created before + +`"create_or_update"`: create the map if it wasn't created before, execute the code in all cases + +[id="{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout"] +===== `push_map_as_event_on_timeout` + + * Value type is <> + * Default value is `false` + +When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new Logstash event. +This enables to detect and process task timeouts in Logstash, but also to manage tasks that have no explicit end event. + +[id="{version}-plugins-{type}s-{plugin}-push_previous_map_as_event"] +===== `push_previous_map_as_event` + + * Value type is <> + * Default value is `false` + +When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new Logstash event, +and then creates a new empty map for the next task. + +WARNING: this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc... + +[id="{version}-plugins-{type}s-{plugin}-task_id"] +===== `task_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The expression defining task ID to correlate logs. + +This value must uniquely identify the task. + +Example: +[source,ruby] + filter { + aggregate { + task_id => "%{type}%{my_task_id}" + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `1800` + +The amount of seconds (since the first event) after which a task is considered as expired. + +When timeout occurs for a task, its aggregate map is evicted. + +If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. + +Timeout can be defined for each "task_id" pattern. + +[id="{version}-plugins-{type}s-{plugin}-timeout_code"] +===== `timeout_code` + + * Value type is <> + * There is no default value for this setting. + +The code to execute to complete timeout generated event, when `'push_map_as_event_on_timeout'` or `'push_previous_map_as_event'` is set to true. +The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map. + +If `'timeout_task_id_field'` is set, the event is also populated with the task_id value + +Example: +[source,ruby] + filter { + aggregate { + timeout_code => "event.set('state', 'timeout')" + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout_tags"] +===== `timeout_tags` + + * Value type is <> + * Default value is `[]` + +Defines tags to add when a timeout event is generated and yield + +Example: +[source,ruby] + filter { + aggregate { + timeout_tags => ["aggregate_timeout'] + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout_task_id_field"] +===== `timeout_task_id_field` + + * Value type is <> + * There is no default value for this setting. + +This option indicates the timeout generated event's field for the "task_id" value. +The task id will then be set into the timeout event. This can help correlate which tasks have been timed out. + +For example, with option `timeout_task_id_field => "my_id"` ,when timeout task id is `"12345"`, the generated timeout event will contain `'my_id' => '12345'`. + +By default, if this option is not set, task id value won't be set into timeout generated event. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/aggregate-v2.7.1.asciidoc b/docs/versioned-plugins/filters/aggregate-v2.7.1.asciidoc new file mode 100644 index 000000000..b3f64704d --- /dev/null +++ b/docs/versioned-plugins/filters/aggregate-v2.7.1.asciidoc @@ -0,0 +1,555 @@ +:plugin: aggregate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.7.1 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-aggregate/blob/v2.7.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Aggregate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + + +[id="{version}-plugins-{type}s-{plugin}-description"] +==== Description + + +The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, +and finally push aggregated information into final task event. + +You should be very careful to set Logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly +otherwise events may be processed out of sequence and unexpected results will occur. + + +[id="{version}-plugins-{type}s-{plugin}-example1"] +==== Example #1 + +* with these given logs : + +[source,ruby] +---------------------------------- + INFO - 12345 - TASK_START - start + INFO - 12345 - SQL - sqlQuery1 - 12 + INFO - 12345 - SQL - sqlQuery2 - 34 + INFO - 12345 - TASK_END - end +---------------------------------- + +* you can aggregate "sql duration" for the whole task with this configuration : + +[source,ruby] +---------------------------------- + filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] + } + + if [logger] == "TASK_START" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] = 0" + map_action => "create" + } + } + + if [logger] == "SQL" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] += event.get('duration')" + map_action => "update" + } + } + + if [logger] == "TASK_END" { + aggregate { + task_id => "%{taskid}" + code => "event.set('sql_duration', map['sql_duration'])" + map_action => "update" + end_of_task => true + timeout => 120 + } + } + } +---------------------------------- + +* the final event then looks like : + +[source,ruby] +---------------------------------- +{ + "message" => "INFO - 12345 - TASK_END - end message", + "sql_duration" => 46 +} +---------------------------------- + +the field `sql_duration` is added and contains the sum of all sql queries durations. + + +[id="{version}-plugins-{type}s-{plugin}-example2"] +==== Example #2 : no start event + +* If you have the same logs than example #1, but without a start log : + +[source,ruby] +---------------------------------- + INFO - 12345 - SQL - sqlQuery1 - 12 + INFO - 12345 - SQL - sqlQuery2 - 34 + INFO - 12345 - TASK_END - end +---------------------------------- + +* you can also aggregate "sql duration" with a slightly different configuration : + +[source,ruby] +---------------------------------- + filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] + } + + if [logger] == "SQL" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')" + } + } + + if [logger] == "TASK_END" { + aggregate { + task_id => "%{taskid}" + code => "event.set('sql_duration', map['sql_duration'])" + end_of_task => true + timeout => 120 + } + } + } +---------------------------------- + +* the final event is exactly the same than example #1 +* the key point is the "||=" ruby operator. It allows to initialize 'sql_duration' map entry to 0 only if this map entry is not already initialized + + +[id="{version}-plugins-{type}s-{plugin}-example3"] +==== Example #3 : no end event + +Third use case: You have no specific end event. + +A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction. + +In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when a timeout occurs. +In addition, we can enable 'timeout_code' to execute code on the populated timeout event. +We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + +* Given these logs: + +[source,ruby] +---------------------------------- +INFO - 12345 - Clicked One +INFO - 12345 - Clicked Two +INFO - 12345 - Clicked Three +---------------------------------- + +* You can aggregate the amount of clicks the user did like this: + +[source,ruby] +---------------------------------- +filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] + } + + aggregate { + task_id => "%{user_id}" + code => "map['clicks'] ||= 0; map['clicks'] += 1;" + push_map_as_event_on_timeout => true + timeout_task_id_field => "user_id" + timeout => 600 # 10 minutes timeout + timeout_tags => ['_aggregatetimeout'] + timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" + } +} +---------------------------------- + +* After ten minutes, this will yield an event like: + +[source,json] +---------------------------------- +{ + "user_id": "12345", + "clicks": 3, + "several_clicks": true, + "tags": [ + "_aggregatetimeout" + ] +} +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-example4"] +==== Example #4 : no end event and tasks come one after the other + +Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other. + +That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, ... + +In that case, you don't want to wait task timeout to flush aggregation map. + +* A typical case is aggregating results from jdbc input plugin. +* Given that you have this SQL query : `SELECT country_name, town_name FROM town` +* Using jdbc input plugin, you get these 3 events from : + +[source,json] +---------------------------------- + { "country_name": "France", "town_name": "Paris" } + { "country_name": "France", "town_name": "Marseille" } + { "country_name": "USA", "town_name": "New-York" } +---------------------------------- + +* And you would like these 2 result events to push them into elasticsearch : + +[source,json] +---------------------------------- + { "country_name": "France", "towns": [ {"town_name": "Paris"}, {"town_name": "Marseille"} ] } + { "country_name": "USA", "towns": [ {"town_name": "New-York"} ] } +---------------------------------- + +* You can do that using `push_previous_map_as_event` aggregate plugin option : + +[source,ruby] +---------------------------------- + filter { + aggregate { + task_id => "%{country_name}" + code => " + map['country_name'] = event.get('country_name') + map['towns'] ||= [] + map['towns'] << {'town_name' => event.get('town_name')} + event.cancel() + " + push_previous_map_as_event => true + timeout => 3 + } + } +---------------------------------- + +* The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next country +* When 5s timeout comes, the last aggregate map is pushed as a new event +* Finally, initial events (which are not aggregated) are dropped because useless (thanks to `event.cancel()`) + + +[id="{version}-plugins-{type}s-{plugin}-example5"] +==== Example #5 : no end event and push events as soon as possible + +Fifth use case: like example #3, there is no end event. + +Events keep comming for an indefinite time and you want to push the aggregation map as soon as possible after the last user interaction without waiting for the `timeout`. + +This allows to have the aggregated events pushed closer to real time. + + +A typical case is aggregating or tracking user behaviour. + +We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. + +There is no specific event indicating the end of the user's interaction. + +The user ineraction will be considered as ended when no events for the specified user (task_id) arrive after the specified inactivity_timeout`. + +If the user continues interacting for longer than `timeout` seconds (since first event), the aggregation map will still be deleted and pushed as a new event when timeout occurs. + +The difference with example #3 is that the events will be pushed as soon as the user stops interacting for `inactivity_timeout` seconds instead of waiting for the end of `timeout` seconds since first event. + +In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when inactivity timeout occurs. + +In addition, we can enable 'timeout_code' to execute code on the populated timeout event. + +We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + + +* Given these logs: + +[source,ruby] +---------------------------------- +INFO - 12345 - Clicked One +INFO - 12345 - Clicked Two +INFO - 12345 - Clicked Three +---------------------------------- + +* You can aggregate the amount of clicks the user did like this: + +[source,ruby] +---------------------------------- +filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] + } + aggregate { + task_id => "%{user_id}" + code => "map['clicks'] ||= 0; map['clicks'] += 1;" + push_map_as_event_on_timeout => true + timeout_task_id_field => "user_id" + timeout => 3600 # 1 hour timeout, user activity will be considered finished one hour after the first event, even if events keep comming + inactivity_timeout => 300 # 5 minutes timeout, user activity will be considered finished if no new events arrive 5 minutes after the last event + timeout_tags => ['_aggregatetimeout'] + timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" + } +} +---------------------------------- + +* After five minutes of inactivity or one hour since first event, this will yield an event like: + +[source,json] +---------------------------------- +{ + "user_id": "12345", + "clicks": 3, + "several_clicks": true, + "tags": [ + "_aggregatetimeout" + ] +} +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-howitworks"] +==== How it works +* the filter needs a "task_id" to correlate events (log lines) of a same task +* at the task beginning, filter creates a map, attached to task_id +* for each event, you can execute code using 'event' and 'map' (for instance, copy an event field to map) +* in the final event, you can execute a last code (for instance, add map data to final event) +* after the final event, the map attached to task is deleted (thanks to `end_of_task => true`) +* an aggregate map is tied to one task_id value which is tied to one task_id pattern. So if you have 2 filters with different task_id patterns, even if you have same task_id value, they won't share the same aggregate map. +* in one filter configuration, it is recommanded to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps +* if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted +* all timeout options have to be defined in only one aggregate filter per task_id pattern (per pipeline). Timeout options are : timeout, inactivity_timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags +* if `code` execution raises an exception, the error is logged and event is tagged '_aggregateexception' + + +[id="{version}-plugins-{type}s-{plugin}-usecases"] +==== Use Cases +* extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2) +* extract error information in any task log line, and push it in final task event (to get a final event with all error information if any) +* extract all back-end calls as a list, and push this list in final task event (to get a task profile) +* extract all http headers logged in several lines to push this list in final task event (complete http request info) +* for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, ...) +* Finally, task id can be any correlation id matching your need : it can be a session id, a file path, ... + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Aggregate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-aggregate_maps_path>> |<>, a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-end_of_task>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-map_action>> |<>, one of `["create", "update", "create_or_update"]`|No +| <<{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-push_previous_map_as_event>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-task_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_code>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_tags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_task_id_field>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-aggregate_maps_path"] +===== `aggregate_maps_path` + + * Value type is <> + * There is no default value for this setting. + +The path to file where aggregate maps are stored when Logstash stops +and are loaded from when Logstash starts. + +If not defined, aggregate maps will not be stored at Logstash stop and will be lost. +Must be defined in only one aggregate filter per pipeline (as aggregate maps are shared at pipeline level). + +Example: +[source,ruby] + filter { + aggregate { + aggregate_maps_path => "/path/to/.aggregate_maps" + } + } + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The code to execute to update map, using current event. + +Or on the contrary, the code to execute to update event, using current map. + +You will have a 'map' variable and an 'event' variable available (that is the event itself). + +Example: +[source,ruby] + filter { + aggregate { + code => "map['sql_duration'] += event.get('duration')" + } + } + +[id="{version}-plugins-{type}s-{plugin}-end_of_task"] +===== `end_of_task` + + * Value type is <> + * Default value is `false` + +Tell the filter that task is ended, and therefore, to delete aggregate map after code execution. + +[id="{version}-plugins-{type}s-{plugin}-inactivity_timeout"] +===== `inactivity_timeout` + + * Value type is <> + * There is no default value for this setting. + +The amount of seconds (since the last event) after which a task is considered as expired. + +When timeout occurs for a task, its aggregate map is evicted. + +If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. + +`inactivity_timeout` can be defined for each "task_id" pattern. + +`inactivity_timeout` must be lower than `timeout`. + +[id="{version}-plugins-{type}s-{plugin}-map_action"] +===== `map_action` + + * Value type is <> + * Default value is `"create_or_update"` + +Tell the filter what to do with aggregate map. + +`"create"`: create the map, and execute the code only if map wasn't created before + +`"update"`: doesn't create the map, and execute the code only if map was created before + +`"create_or_update"`: create the map if it wasn't created before, execute the code in all cases + +[id="{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout"] +===== `push_map_as_event_on_timeout` + + * Value type is <> + * Default value is `false` + +When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new Logstash event. +This enables to detect and process task timeouts in Logstash, but also to manage tasks that have no explicit end event. + +[id="{version}-plugins-{type}s-{plugin}-push_previous_map_as_event"] +===== `push_previous_map_as_event` + + * Value type is <> + * Default value is `false` + +When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new Logstash event, +and then creates a new empty map for the next task. + +WARNING: this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc... + +[id="{version}-plugins-{type}s-{plugin}-task_id"] +===== `task_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The expression defining task ID to correlate logs. + +This value must uniquely identify the task. + +Example: +[source,ruby] + filter { + aggregate { + task_id => "%{type}%{my_task_id}" + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `1800` + +The amount of seconds (since the first event) after which a task is considered as expired. + +When timeout occurs for a task, its aggregate map is evicted. + +If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. + +Timeout can be defined for each "task_id" pattern. + +[id="{version}-plugins-{type}s-{plugin}-timeout_code"] +===== `timeout_code` + + * Value type is <> + * There is no default value for this setting. + +The code to execute to complete timeout generated event, when `'push_map_as_event_on_timeout'` or `'push_previous_map_as_event'` is set to true. +The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map. + +If `'timeout_task_id_field'` is set, the event is also populated with the task_id value + +Example: +[source,ruby] + filter { + aggregate { + timeout_code => "event.set('state', 'timeout')" + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout_tags"] +===== `timeout_tags` + + * Value type is <> + * Default value is `[]` + +Defines tags to add when a timeout event is generated and yield + +Example: +[source,ruby] + filter { + aggregate { + timeout_tags => ["aggregate_timeout'] + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout_task_id_field"] +===== `timeout_task_id_field` + + * Value type is <> + * There is no default value for this setting. + +This option indicates the timeout generated event's field for the "task_id" value. +The task id will then be set into the timeout event. This can help correlate which tasks have been timed out. + +For example, with option `timeout_task_id_field => "my_id"` ,when timeout task id is `"12345"`, the generated timeout event will contain `'my_id' => '12345'`. + +By default, if this option is not set, task id value won't be set into timeout generated event. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/aggregate-v2.7.2.asciidoc b/docs/versioned-plugins/filters/aggregate-v2.7.2.asciidoc new file mode 100644 index 000000000..a81646ccc --- /dev/null +++ b/docs/versioned-plugins/filters/aggregate-v2.7.2.asciidoc @@ -0,0 +1,555 @@ +:plugin: aggregate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.7.2 +:release_date: 2017-11-16 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-aggregate/blob/v2.7.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Aggregate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + + +[id="{version}-plugins-{type}s-{plugin}-description"] +==== Description + + +The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, +and finally push aggregated information into final task event. + +You should be very careful to set Logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly +otherwise events may be processed out of sequence and unexpected results will occur. + + +[id="{version}-plugins-{type}s-{plugin}-example1"] +==== Example #1 + +* with these given logs : + +[source,ruby] +---------------------------------- + INFO - 12345 - TASK_START - start + INFO - 12345 - SQL - sqlQuery1 - 12 + INFO - 12345 - SQL - sqlQuery2 - 34 + INFO - 12345 - TASK_END - end +---------------------------------- + +* you can aggregate "sql duration" for the whole task with this configuration : + +[source,ruby] +---------------------------------- + filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] + } + + if [logger] == "TASK_START" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] = 0" + map_action => "create" + } + } + + if [logger] == "SQL" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] += event.get('duration')" + map_action => "update" + } + } + + if [logger] == "TASK_END" { + aggregate { + task_id => "%{taskid}" + code => "event.set('sql_duration', map['sql_duration'])" + map_action => "update" + end_of_task => true + timeout => 120 + } + } + } +---------------------------------- + +* the final event then looks like : + +[source,ruby] +---------------------------------- +{ + "message" => "INFO - 12345 - TASK_END - end message", + "sql_duration" => 46 +} +---------------------------------- + +the field `sql_duration` is added and contains the sum of all sql queries durations. + + +[id="{version}-plugins-{type}s-{plugin}-example2"] +==== Example #2 : no start event + +* If you have the same logs than example #1, but without a start log : + +[source,ruby] +---------------------------------- + INFO - 12345 - SQL - sqlQuery1 - 12 + INFO - 12345 - SQL - sqlQuery2 - 34 + INFO - 12345 - TASK_END - end +---------------------------------- + +* you can also aggregate "sql duration" with a slightly different configuration : + +[source,ruby] +---------------------------------- + filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] + } + + if [logger] == "SQL" { + aggregate { + task_id => "%{taskid}" + code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')" + } + } + + if [logger] == "TASK_END" { + aggregate { + task_id => "%{taskid}" + code => "event.set('sql_duration', map['sql_duration'])" + end_of_task => true + timeout => 120 + } + } + } +---------------------------------- + +* the final event is exactly the same than example #1 +* the key point is the "||=" ruby operator. It allows to initialize 'sql_duration' map entry to 0 only if this map entry is not already initialized + + +[id="{version}-plugins-{type}s-{plugin}-example3"] +==== Example #3 : no end event + +Third use case: You have no specific end event. + +A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction. + +In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when a timeout occurs. +In addition, we can enable 'timeout_code' to execute code on the populated timeout event. +We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + +* Given these logs: + +[source,ruby] +---------------------------------- +INFO - 12345 - Clicked One +INFO - 12345 - Clicked Two +INFO - 12345 - Clicked Three +---------------------------------- + +* You can aggregate the amount of clicks the user did like this: + +[source,ruby] +---------------------------------- +filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] + } + + aggregate { + task_id => "%{user_id}" + code => "map['clicks'] ||= 0; map['clicks'] += 1;" + push_map_as_event_on_timeout => true + timeout_task_id_field => "user_id" + timeout => 600 # 10 minutes timeout + timeout_tags => ['_aggregatetimeout'] + timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" + } +} +---------------------------------- + +* After ten minutes, this will yield an event like: + +[source,json] +---------------------------------- +{ + "user_id": "12345", + "clicks": 3, + "several_clicks": true, + "tags": [ + "_aggregatetimeout" + ] +} +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-example4"] +==== Example #4 : no end event and tasks come one after the other + +Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other. + +That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, ... + +In that case, you don't want to wait task timeout to flush aggregation map. + +* A typical case is aggregating results from jdbc input plugin. +* Given that you have this SQL query : `SELECT country_name, town_name FROM town` +* Using jdbc input plugin, you get these 3 events from : + +[source,json] +---------------------------------- + { "country_name": "France", "town_name": "Paris" } + { "country_name": "France", "town_name": "Marseille" } + { "country_name": "USA", "town_name": "New-York" } +---------------------------------- + +* And you would like these 2 result events to push them into elasticsearch : + +[source,json] +---------------------------------- + { "country_name": "France", "towns": [ {"town_name": "Paris"}, {"town_name": "Marseille"} ] } + { "country_name": "USA", "towns": [ {"town_name": "New-York"} ] } +---------------------------------- + +* You can do that using `push_previous_map_as_event` aggregate plugin option : + +[source,ruby] +---------------------------------- + filter { + aggregate { + task_id => "%{country_name}" + code => " + map['country_name'] = event.get('country_name') + map['towns'] ||= [] + map['towns'] << {'town_name' => event.get('town_name')} + event.cancel() + " + push_previous_map_as_event => true + timeout => 3 + } + } +---------------------------------- + +* The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next country +* When 5s timeout comes, the last aggregate map is pushed as a new event +* Finally, initial events (which are not aggregated) are dropped because useless (thanks to `event.cancel()`) + + +[id="{version}-plugins-{type}s-{plugin}-example5"] +==== Example #5 : no end event and push events as soon as possible + +Fifth use case: like example #3, there is no end event. + +Events keep comming for an indefinite time and you want to push the aggregation map as soon as possible after the last user interaction without waiting for the `timeout`. + +This allows to have the aggregated events pushed closer to real time. + + +A typical case is aggregating or tracking user behaviour. + +We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. + +There is no specific event indicating the end of the user's interaction. + +The user ineraction will be considered as ended when no events for the specified user (task_id) arrive after the specified inactivity_timeout`. + +If the user continues interacting for longer than `timeout` seconds (since first event), the aggregation map will still be deleted and pushed as a new event when timeout occurs. + +The difference with example #3 is that the events will be pushed as soon as the user stops interacting for `inactivity_timeout` seconds instead of waiting for the end of `timeout` seconds since first event. + +In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when inactivity timeout occurs. + +In addition, we can enable 'timeout_code' to execute code on the populated timeout event. + +We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + + +* Given these logs: + +[source,ruby] +---------------------------------- +INFO - 12345 - Clicked One +INFO - 12345 - Clicked Two +INFO - 12345 - Clicked Three +---------------------------------- + +* You can aggregate the amount of clicks the user did like this: + +[source,ruby] +---------------------------------- +filter { + grok { + match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] + } + aggregate { + task_id => "%{user_id}" + code => "map['clicks'] ||= 0; map['clicks'] += 1;" + push_map_as_event_on_timeout => true + timeout_task_id_field => "user_id" + timeout => 3600 # 1 hour timeout, user activity will be considered finished one hour after the first event, even if events keep comming + inactivity_timeout => 300 # 5 minutes timeout, user activity will be considered finished if no new events arrive 5 minutes after the last event + timeout_tags => ['_aggregatetimeout'] + timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" + } +} +---------------------------------- + +* After five minutes of inactivity or one hour since first event, this will yield an event like: + +[source,json] +---------------------------------- +{ + "user_id": "12345", + "clicks": 3, + "several_clicks": true, + "tags": [ + "_aggregatetimeout" + ] +} +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-howitworks"] +==== How it works +* the filter needs a "task_id" to correlate events (log lines) of a same task +* at the task beginning, filter creates a map, attached to task_id +* for each event, you can execute code using 'event' and 'map' (for instance, copy an event field to map) +* in the final event, you can execute a last code (for instance, add map data to final event) +* after the final event, the map attached to task is deleted (thanks to `end_of_task => true`) +* an aggregate map is tied to one task_id value which is tied to one task_id pattern. So if you have 2 filters with different task_id patterns, even if you have same task_id value, they won't share the same aggregate map. +* in one filter configuration, it is recommanded to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps +* if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted +* all timeout options have to be defined in only one aggregate filter per task_id pattern (per pipeline). Timeout options are : timeout, inactivity_timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags +* if `code` execution raises an exception, the error is logged and event is tagged '_aggregateexception' + + +[id="{version}-plugins-{type}s-{plugin}-usecases"] +==== Use Cases +* extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2) +* extract error information in any task log line, and push it in final task event (to get a final event with all error information if any) +* extract all back-end calls as a list, and push this list in final task event (to get a task profile) +* extract all http headers logged in several lines to push this list in final task event (complete http request info) +* for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, ...) +* Finally, task id can be any correlation id matching your need : it can be a session id, a file path, ... + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Aggregate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-aggregate_maps_path>> |<>, a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-end_of_task>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-map_action>> |<>, one of `["create", "update", "create_or_update"]`|No +| <<{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-push_previous_map_as_event>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-task_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_code>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_tags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_task_id_field>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-aggregate_maps_path"] +===== `aggregate_maps_path` + + * Value type is <> + * There is no default value for this setting. + +The path to file where aggregate maps are stored when Logstash stops +and are loaded from when Logstash starts. + +If not defined, aggregate maps will not be stored at Logstash stop and will be lost. +Must be defined in only one aggregate filter per pipeline (as aggregate maps are shared at pipeline level). + +Example: +[source,ruby] + filter { + aggregate { + aggregate_maps_path => "/path/to/.aggregate_maps" + } + } + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The code to execute to update map, using current event. + +Or on the contrary, the code to execute to update event, using current map. + +You will have a 'map' variable and an 'event' variable available (that is the event itself). + +Example: +[source,ruby] + filter { + aggregate { + code => "map['sql_duration'] += event.get('duration')" + } + } + +[id="{version}-plugins-{type}s-{plugin}-end_of_task"] +===== `end_of_task` + + * Value type is <> + * Default value is `false` + +Tell the filter that task is ended, and therefore, to delete aggregate map after code execution. + +[id="{version}-plugins-{type}s-{plugin}-inactivity_timeout"] +===== `inactivity_timeout` + + * Value type is <> + * There is no default value for this setting. + +The amount of seconds (since the last event) after which a task is considered as expired. + +When timeout occurs for a task, its aggregate map is evicted. + +If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. + +`inactivity_timeout` can be defined for each "task_id" pattern. + +`inactivity_timeout` must be lower than `timeout`. + +[id="{version}-plugins-{type}s-{plugin}-map_action"] +===== `map_action` + + * Value type is <> + * Default value is `"create_or_update"` + +Tell the filter what to do with aggregate map. + +`"create"`: create the map, and execute the code only if map wasn't created before + +`"update"`: doesn't create the map, and execute the code only if map was created before + +`"create_or_update"`: create the map if it wasn't created before, execute the code in all cases + +[id="{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout"] +===== `push_map_as_event_on_timeout` + + * Value type is <> + * Default value is `false` + +When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new Logstash event. +This enables to detect and process task timeouts in Logstash, but also to manage tasks that have no explicit end event. + +[id="{version}-plugins-{type}s-{plugin}-push_previous_map_as_event"] +===== `push_previous_map_as_event` + + * Value type is <> + * Default value is `false` + +When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new Logstash event, +and then creates a new empty map for the next task. + +WARNING: this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc... + +[id="{version}-plugins-{type}s-{plugin}-task_id"] +===== `task_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The expression defining task ID to correlate logs. + +This value must uniquely identify the task. + +Example: +[source,ruby] + filter { + aggregate { + task_id => "%{type}%{my_task_id}" + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `1800` + +The amount of seconds (since the first event) after which a task is considered as expired. + +When timeout occurs for a task, its aggregate map is evicted. + +If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. + +Timeout can be defined for each "task_id" pattern. + +[id="{version}-plugins-{type}s-{plugin}-timeout_code"] +===== `timeout_code` + + * Value type is <> + * There is no default value for this setting. + +The code to execute to complete timeout generated event, when `'push_map_as_event_on_timeout'` or `'push_previous_map_as_event'` is set to true. +The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map. + +If `'timeout_task_id_field'` is set, the event is also populated with the task_id value + +Example: +[source,ruby] + filter { + aggregate { + timeout_code => "event.set('state', 'timeout')" + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout_tags"] +===== `timeout_tags` + + * Value type is <> + * Default value is `[]` + +Defines tags to add when a timeout event is generated and yield + +Example: +[source,ruby] + filter { + aggregate { + timeout_tags => ["aggregate_timeout'] + } + } + +[id="{version}-plugins-{type}s-{plugin}-timeout_task_id_field"] +===== `timeout_task_id_field` + + * Value type is <> + * There is no default value for this setting. + +This option indicates the timeout generated event's field for the "task_id" value. +The task id will then be set into the timeout event. This can help correlate which tasks have been timed out. + +For example, with option `timeout_task_id_field => "my_id"` ,when timeout task id is `"12345"`, the generated timeout event will contain `'my_id' => '12345'`. + +By default, if this option is not set, task id value won't be set into timeout generated event. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/alter-index.asciidoc b/docs/versioned-plugins/filters/alter-index.asciidoc new file mode 100644 index 000000000..3aaefc801 --- /dev/null +++ b/docs/versioned-plugins/filters/alter-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: alter +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::alter-v3.0.3.asciidoc[] +include::alter-v3.0.2.asciidoc[] +include::alter-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/alter-v3.0.1.asciidoc b/docs/versioned-plugins/filters/alter-v3.0.1.asciidoc new file mode 100644 index 000000000..ff8373857 --- /dev/null +++ b/docs/versioned-plugins/filters/alter-v3.0.1.asciidoc @@ -0,0 +1,111 @@ +:plugin: alter +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-alter/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Alter filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The alter filter allows you to do general alterations to fields +that are not included in the normal mutate filter. + + +NOTE: The functionality provided by this plugin is likely to +be merged into the 'mutate' filter in future versions. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Alter Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-coalesce>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-condrewrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-condrewriteother>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-coalesce"] +===== `coalesce` + + * Value type is <> + * There is no default value for this setting. + +Sets the value of field_name to the first nonnull expression among its arguments. + +Example: +[source,ruby] + filter { + alter { + coalesce => [ + "field_name", "value1", "value2", "value3", ... + ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-condrewrite"] +===== `condrewrite` + + * Value type is <> + * There is no default value for this setting. + +Change the content of the field to the specified value +if the actual content is equal to the expected one. + +Example: +[source,ruby] + filter { + alter { + condrewrite => [ + "field_name", "expected_value", "new_value", + "field_name2", "expected_value2", "new_value2", + .... + ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-condrewriteother"] +===== `condrewriteother` + + * Value type is <> + * There is no default value for this setting. + +Change the content of the field to the specified value +if the content of another field is equal to the expected one. + +Example: +[source,ruby] + filter { + alter { + condrewriteother => [ + "field_name", "expected_value", "field_name_to_change", "value", + "field_name2", "expected_value2", "field_name_to_change2", "value2", + .... + ] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/alter-v3.0.2.asciidoc b/docs/versioned-plugins/filters/alter-v3.0.2.asciidoc new file mode 100644 index 000000000..278317b0a --- /dev/null +++ b/docs/versioned-plugins/filters/alter-v3.0.2.asciidoc @@ -0,0 +1,111 @@ +:plugin: alter +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-alter/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Alter filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The alter filter allows you to do general alterations to fields +that are not included in the normal mutate filter. + + +NOTE: The functionality provided by this plugin is likely to +be merged into the 'mutate' filter in future versions. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Alter Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-coalesce>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-condrewrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-condrewriteother>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-coalesce"] +===== `coalesce` + + * Value type is <> + * There is no default value for this setting. + +Sets the value of field_name to the first nonnull expression among its arguments. + +Example: +[source,ruby] + filter { + alter { + coalesce => [ + "field_name", "value1", "value2", "value3", ... + ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-condrewrite"] +===== `condrewrite` + + * Value type is <> + * There is no default value for this setting. + +Change the content of the field to the specified value +if the actual content is equal to the expected one. + +Example: +[source,ruby] + filter { + alter { + condrewrite => [ + "field_name", "expected_value", "new_value", + "field_name2", "expected_value2", "new_value2", + .... + ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-condrewriteother"] +===== `condrewriteother` + + * Value type is <> + * There is no default value for this setting. + +Change the content of the field to the specified value +if the content of another field is equal to the expected one. + +Example: +[source,ruby] + filter { + alter { + condrewriteother => [ + "field_name", "expected_value", "field_name_to_change", "value", + "field_name2", "expected_value2", "field_name_to_change2", "value2", + .... + ] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/alter-v3.0.3.asciidoc b/docs/versioned-plugins/filters/alter-v3.0.3.asciidoc new file mode 100644 index 000000000..fe83eb721 --- /dev/null +++ b/docs/versioned-plugins/filters/alter-v3.0.3.asciidoc @@ -0,0 +1,111 @@ +:plugin: alter +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-alter/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Alter filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The alter filter allows you to do general alterations to fields +that are not included in the normal mutate filter. + + +NOTE: The functionality provided by this plugin is likely to +be merged into the 'mutate' filter in future versions. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Alter Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-coalesce>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-condrewrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-condrewriteother>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-coalesce"] +===== `coalesce` + + * Value type is <> + * There is no default value for this setting. + +Sets the value of field_name to the first nonnull expression among its arguments. + +Example: +[source,ruby] + filter { + alter { + coalesce => [ + "field_name", "value1", "value2", "value3", ... + ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-condrewrite"] +===== `condrewrite` + + * Value type is <> + * There is no default value for this setting. + +Change the content of the field to the specified value +if the actual content is equal to the expected one. + +Example: +[source,ruby] + filter { + alter { + condrewrite => [ + "field_name", "expected_value", "new_value", + "field_name2", "expected_value2", "new_value2", + .... + ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-condrewriteother"] +===== `condrewriteother` + + * Value type is <> + * There is no default value for this setting. + +Change the content of the field to the specified value +if the content of another field is equal to the expected one. + +Example: +[source,ruby] + filter { + alter { + condrewriteother => [ + "field_name", "expected_value", "field_name_to_change", "value", + "field_name2", "expected_value2", "field_name_to_change2", "value2", + .... + ] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/anonymize-index.asciidoc b/docs/versioned-plugins/filters/anonymize-index.asciidoc new file mode 100644 index 000000000..222460c1f --- /dev/null +++ b/docs/versioned-plugins/filters/anonymize-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: anonymize +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::anonymize-v3.0.6.asciidoc[] +include::anonymize-v3.0.5.asciidoc[] +include::anonymize-v3.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/filters/anonymize-v3.0.4.asciidoc b/docs/versioned-plugins/filters/anonymize-v3.0.4.asciidoc new file mode 100644 index 000000000..38a4d91bf --- /dev/null +++ b/docs/versioned-plugins/filters/anonymize-v3.0.4.asciidoc @@ -0,0 +1,77 @@ +:plugin: anonymize +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-anonymize/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Anonymize filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +deprecated[3.0.3,We recommend that you use the <> instead.] + +Anonymize fields by replacing values with a consistent hash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Anonymize Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5", "MURMUR3", "IPV4_NETWORK"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-algorithm"] +===== `algorithm` + + * This is a required setting. + * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5`, `MURMUR3`, `IPV4_NETWORK` + * Default value is `"SHA1"` + +digest/hash type + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The fields to be anonymized + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Hashing key +When using MURMUR3 the key is ignored but must still be set. +When using IPV4_NETWORK key is the subnet prefix lentgh + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/anonymize-v3.0.5.asciidoc b/docs/versioned-plugins/filters/anonymize-v3.0.5.asciidoc new file mode 100644 index 000000000..71b548a86 --- /dev/null +++ b/docs/versioned-plugins/filters/anonymize-v3.0.5.asciidoc @@ -0,0 +1,77 @@ +:plugin: anonymize +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-anonymize/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Anonymize filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +deprecated[3.0.3,We recommend that you use the <> instead.] + +Anonymize fields by replacing values with a consistent hash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Anonymize Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5", "MURMUR3", "IPV4_NETWORK"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-algorithm"] +===== `algorithm` + + * This is a required setting. + * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5`, `MURMUR3`, `IPV4_NETWORK` + * Default value is `"SHA1"` + +digest/hash type + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The fields to be anonymized + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Hashing key +When using MURMUR3 the key is ignored but must still be set. +When using IPV4_NETWORK key is the subnet prefix lentgh + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/anonymize-v3.0.6.asciidoc b/docs/versioned-plugins/filters/anonymize-v3.0.6.asciidoc new file mode 100644 index 000000000..1d5a9d329 --- /dev/null +++ b/docs/versioned-plugins/filters/anonymize-v3.0.6.asciidoc @@ -0,0 +1,77 @@ +:plugin: anonymize +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-anonymize/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Anonymize filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +deprecated[3.0.3,We recommend that you use the <> instead.] + +Anonymize fields by replacing values with a consistent hash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Anonymize Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5", "MURMUR3", "IPV4_NETWORK"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-algorithm"] +===== `algorithm` + + * This is a required setting. + * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5`, `MURMUR3`, `IPV4_NETWORK` + * Default value is `"SHA1"` + +digest/hash type + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The fields to be anonymized + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Hashing key +When using MURMUR3 the key is ignored but must still be set. +When using IPV4_NETWORK key is the subnet prefix lentgh + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/bytesize-index.asciidoc b/docs/versioned-plugins/filters/bytesize-index.asciidoc new file mode 100644 index 000000000..efc9f5967 --- /dev/null +++ b/docs/versioned-plugins/filters/bytesize-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: bytesize +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/filters/checksum-index.asciidoc b/docs/versioned-plugins/filters/checksum-index.asciidoc new file mode 100644 index 000000000..bfa48d370 --- /dev/null +++ b/docs/versioned-plugins/filters/checksum-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: checksum +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::checksum-v3.0.4.asciidoc[] +include::checksum-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/filters/checksum-v3.0.3.asciidoc b/docs/versioned-plugins/filters/checksum-v3.0.3.asciidoc new file mode 100644 index 000000000..777ce08de --- /dev/null +++ b/docs/versioned-plugins/filters/checksum-v3.0.3.asciidoc @@ -0,0 +1,69 @@ +:plugin: checksum +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-checksum/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Checksum filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter let's you create a checksum based on various parts +of the logstash event. +This can be useful for deduplication of messages or simply to provide +a custom unique identifier. + +This is VERY experimental and is largely a proof-of-concept + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Checksum Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>, one of `["md5", "sha", "sha1", "sha256", "sha384"]`|No +| <<{version}-plugins-{type}s-{plugin}-keys>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-algorithm"] +===== `algorithm` + + * Value can be any of: `md5`, `sha`, `sha1`, `sha256`, `sha384` + * Default value is `"sha256"` + + + +[id="{version}-plugins-{type}s-{plugin}-keys"] +===== `keys` + + * Value type is <> + * Default value is `["message", "@timestamp", "type"]` + +A list of keys to use in creating the string to checksum +Keys will be sorted before building the string +keys and values will then be concatenated with pipe delimeters +and checksummed + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/checksum-v3.0.4.asciidoc b/docs/versioned-plugins/filters/checksum-v3.0.4.asciidoc new file mode 100644 index 000000000..987435a6c --- /dev/null +++ b/docs/versioned-plugins/filters/checksum-v3.0.4.asciidoc @@ -0,0 +1,69 @@ +:plugin: checksum +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-checksum/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Checksum filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter let's you create a checksum based on various parts +of the logstash event. +This can be useful for deduplication of messages or simply to provide +a custom unique identifier. + +This is VERY experimental and is largely a proof-of-concept + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Checksum Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>, one of `["md5", "sha", "sha1", "sha256", "sha384"]`|No +| <<{version}-plugins-{type}s-{plugin}-keys>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-algorithm"] +===== `algorithm` + + * Value can be any of: `md5`, `sha`, `sha1`, `sha256`, `sha384` + * Default value is `"sha256"` + + + +[id="{version}-plugins-{type}s-{plugin}-keys"] +===== `keys` + + * Value type is <> + * Default value is `["message", "@timestamp", "type"]` + +A list of keys to use in creating the string to checksum +Keys will be sorted before building the string +keys and values will then be concatenated with pipe delimeters +and checksummed + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/cidr-index.asciidoc b/docs/versioned-plugins/filters/cidr-index.asciidoc new file mode 100644 index 000000000..5f29279d5 --- /dev/null +++ b/docs/versioned-plugins/filters/cidr-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: cidr +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::cidr-v3.1.2.asciidoc[] +include::cidr-v3.1.1.asciidoc[] +include::cidr-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/cidr-v3.0.1.asciidoc b/docs/versioned-plugins/filters/cidr-v3.0.1.asciidoc new file mode 100644 index 000000000..4c3924614 --- /dev/null +++ b/docs/versioned-plugins/filters/cidr-v3.0.1.asciidoc @@ -0,0 +1,80 @@ +:plugin: cidr +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-cidr/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Cidr filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The CIDR filter is for checking IP addresses in events against a list of +network blocks that might contain it. Multiple addresses can be checked +against multiple networks, any match succeeds. Upon success additional tags +and/or fields can be added to the event. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cidr Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-network>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-address"] +===== `address` + + * Value type is <> + * Default value is `[]` + +The IP address(es) to check with. Example: +[source,ruby] + filter { + cidr { + add_tag => [ "testnet" ] + address => [ "%{src_ip}", "%{dst_ip}" ] + network => [ "192.0.2.0/24" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-network"] +===== `network` + + * Value type is <> + * Default value is `[]` + +The IP network(s) to check against. Example: +[source,ruby] + filter { + cidr { + add_tag => [ "linklocal" ] + address => [ "%{clientip}" ] + network => [ "169.254.0.0/16", "fe80::/64" ] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/cidr-v3.1.1.asciidoc b/docs/versioned-plugins/filters/cidr-v3.1.1.asciidoc new file mode 100644 index 000000000..03ebc90a4 --- /dev/null +++ b/docs/versioned-plugins/filters/cidr-v3.1.1.asciidoc @@ -0,0 +1,114 @@ +:plugin: cidr +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.1 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-cidr/blob/v3.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cidr filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The CIDR filter is for checking IP addresses in events against a list of +network blocks that might contain it. Multiple addresses can be checked +against multiple networks, any match succeeds. Upon success additional tags +and/or fields can be added to the event. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cidr Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-network>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-network_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-refresh_interval>>| <>|No +| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-address"] +===== `address` + + * Value type is <> + * Default value is `[]` + +The IP address(es) to check with. Example: +[source,ruby] + filter { + cidr { + add_tag => [ "testnet" ] + address => [ "%{src_ip}", "%{dst_ip}" ] + network => [ "192.0.2.0/24" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-network"] +===== `network` + + * Value type is <> + * Default value is `[]` + +The IP network(s) to check against. Example: +[source,ruby] + filter { + cidr { + add_tag => [ "linklocal" ] + address => [ "%{clientip}" ] + network => [ "169.254.0.0/16", "fe80::/64" ] + } + } + + +[id="{version}-plugins-{type}s-{plugin}-network_path"] +===== `network_path` + + * Value type is <> + * There is no default value for this setting. + +The full path of the external file containing the networks the filter should check with. +Networks are separated by a separator character defined in `separator`. +[source,ruby] + 192.168.1.0/24 + 192.167.0.0/16 +NOTE: It is an error to specify both `network` and `network_path`. + +[id="{version}-plugins-{type}s-{plugin}-refresh_interval"] +===== `refresh_interval` + + * Value type is <> + * Default value is `600` + +When using an external file, this setting will indicate how frequently +(in seconds) Logstash will check the file for updates. + + +[id="{version}-plugins-{type}s-{plugin}-separator"] +===== `separator` + + * Value type is <> + * Default value is `\n` + +Separator character used for parsing networks from the external file +specified by `network_path`. Defaults to newline `\n` character. + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/cidr-v3.1.2.asciidoc b/docs/versioned-plugins/filters/cidr-v3.1.2.asciidoc new file mode 100644 index 000000000..5b0ad6d78 --- /dev/null +++ b/docs/versioned-plugins/filters/cidr-v3.1.2.asciidoc @@ -0,0 +1,114 @@ +:plugin: cidr +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-cidr/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cidr filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The CIDR filter is for checking IP addresses in events against a list of +network blocks that might contain it. Multiple addresses can be checked +against multiple networks, any match succeeds. Upon success additional tags +and/or fields can be added to the event. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cidr Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-network>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-network_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-refresh_interval>>| <>|No +| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-address"] +===== `address` + + * Value type is <> + * Default value is `[]` + +The IP address(es) to check with. Example: +[source,ruby] + filter { + cidr { + add_tag => [ "testnet" ] + address => [ "%{src_ip}", "%{dst_ip}" ] + network => [ "192.0.2.0/24" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-network"] +===== `network` + + * Value type is <> + * Default value is `[]` + +The IP network(s) to check against. Example: +[source,ruby] + filter { + cidr { + add_tag => [ "linklocal" ] + address => [ "%{clientip}" ] + network => [ "169.254.0.0/16", "fe80::/64" ] + } + } + + +[id="{version}-plugins-{type}s-{plugin}-network_path"] +===== `network_path` + + * Value type is <> + * There is no default value for this setting. + +The full path of the external file containing the networks the filter should check with. +Networks are separated by a separator character defined in `separator`. +[source,ruby] + 192.168.1.0/24 + 192.167.0.0/16 +NOTE: It is an error to specify both `network` and `network_path`. + +[id="{version}-plugins-{type}s-{plugin}-refresh_interval"] +===== `refresh_interval` + + * Value type is <> + * Default value is `600` + +When using an external file, this setting will indicate how frequently +(in seconds) Logstash will check the file for updates. + + +[id="{version}-plugins-{type}s-{plugin}-separator"] +===== `separator` + + * Value type is <> + * Default value is `\n` + +Separator character used for parsing networks from the external file +specified by `network_path`. Defaults to newline `\n` character. + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/cipher-index.asciidoc b/docs/versioned-plugins/filters/cipher-index.asciidoc new file mode 100644 index 000000000..872886b79 --- /dev/null +++ b/docs/versioned-plugins/filters/cipher-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: cipher +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-10-02 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::cipher-v3.0.1.asciidoc[] +include::cipher-v3.0.0.asciidoc[] +include::cipher-v2.0.7.asciidoc[] +include::cipher-v2.0.6.asciidoc[] + diff --git a/docs/versioned-plugins/filters/cipher-v2.0.6.asciidoc b/docs/versioned-plugins/filters/cipher-v2.0.6.asciidoc new file mode 100644 index 000000000..93f0b4854 --- /dev/null +++ b/docs/versioned-plugins/filters/cipher-v2.0.6.asciidoc @@ -0,0 +1,243 @@ +:plugin: cipher +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.6 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-cipher/blob/v2.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Cipher filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter parses a source and apply a cipher or decipher before +storing it in the target. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cipher Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-base64>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cipher_padding>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-iv_random_length>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_pad>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-key_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_cipher_reuse>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-algorithm"] +===== `algorithm` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The cipher algorithm + +A list of supported algorithms can be obtained by +[source,ruby] + puts OpenSSL::Cipher.ciphers + +[id="{version}-plugins-{type}s-{plugin}-base64"] +===== `base64` + + * Value type is <> + * Default value is `true` + +Do we have to perform a `base64` decode or encode? + +If we are decrypting, `base64` decode will be done before. +If we are encrypting, `base64` will be done after. + + +[id="{version}-plugins-{type}s-{plugin}-cipher_padding"] +===== `cipher_padding` + + * Value type is <> + * There is no default value for this setting. + +Cipher padding to use. Enables or disables padding. + +By default encryption operations are padded using standard block padding +and the padding is checked and removed when decrypting. If the pad +parameter is zero then no padding is performed, the total amount of data +encrypted or decrypted must then be a multiple of the block size or an +error will occur. + +See EVP_CIPHER_CTX_set_padding for further information. + +We are using Openssl jRuby which uses default padding to PKCS5Padding +If you want to change it, set this parameter. If you want to disable +it, Set this parameter to 0 +[source,ruby] + filter { cipher { cipher_padding => 0 }} + +[id="{version}-plugins-{type}s-{plugin}-iv"] +===== `iv` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +The initialization vector to use (statically hard-coded). For +a random IV see the iv_random_length property + +NOTE: If iv_random_length is set, it takes precedence over any value set for "iv" + +The cipher modes CBC, CFB, OFB and CTR all need an "initialization +vector", or short, IV. ECB mode is the only mode that does not require +an IV, but there is almost no legitimate use case for this mode +because of the fact that it does not sufficiently hide plaintext patterns. + +For AES algorithms set this to a 16 byte string. +[source,ruby] + filter { cipher { iv => "1234567890123456" }} + +Deprecated: Please use `iv_random_length` instead + +[id="{version}-plugins-{type}s-{plugin}-iv_random_length"] +===== `iv_random_length` + + * Value type is <> + * There is no default value for this setting. + +Force an random IV to be used per encryption invocation and specify +the length of the random IV that will be generated via: + + OpenSSL::Random.random_bytes(int_length) + +If iv_random_length is set, it takes precedence over any value set for "iv" + +Enabling this will force the plugin to generate a unique +random IV for each encryption call. This random IV will be prepended to the +encrypted result bytes and then base64 encoded. On decryption "iv_random_length" must +also be set to utilize this feature. Random IV's are better than statically +hardcoded IVs + +For AES algorithms you can set this to a 16 +[source,ruby] + filter { cipher { iv_random_length => 16 }} + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * There is no default value for this setting. + +The key to use + +NOTE: If you encounter an error message at runtime containing the following: + +"java.security.InvalidKeyException: Illegal key size: possibly you need to install +Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files for your JRE" + +Please read the following: https://github.com/jruby/jruby/wiki/UnlimitedStrengthCrypto + + +[id="{version}-plugins-{type}s-{plugin}-key_pad"] +===== `key_pad` + + * Value type is <> + * Default value is `"\u0000"` + +The character used to pad the key + +[id="{version}-plugins-{type}s-{plugin}-key_size"] +===== `key_size` + + * Value type is <> + * Default value is `16` + +The key size to pad + +It depends of the cipher algorithm. If your key doesn't need +padding, don't set this parameter + +Example, for AES-128, we must have 16 char long key. AES-256 = 32 chars +[source,ruby] + filter { cipher { key_size => 16 } + + +[id="{version}-plugins-{type}s-{plugin}-max_cipher_reuse"] +===== `max_cipher_reuse` + + * Value type is <> + * Default value is `1` + +If this is set the internal Cipher instance will be +re-used up to @max_cipher_reuse times before being +reset() and re-created from scratch. This is an option +for efficiency where lots of data is being encrypted +and decrypted using this filter. This lets the filter +avoid creating new Cipher instances over and over +for each encrypt/decrypt operation. + +This is optional, the default is no re-use of the Cipher +instance and max_cipher_reuse = 1 by default +[source,ruby] + filter { cipher { max_cipher_reuse => 1000 }} + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Encrypting or decrypting some data + +Valid values are encrypt or decrypt + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The field to perform filter + +Example, to use the @message field (default) : +[source,ruby] + filter { cipher { source => "message" } } + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"message"` + +The name of the container to put the result + +Example, to place the result into crypt : +[source,ruby] + filter { cipher { target => "crypt" } } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/cipher-v2.0.7.asciidoc b/docs/versioned-plugins/filters/cipher-v2.0.7.asciidoc new file mode 100644 index 000000000..0c4f34c40 --- /dev/null +++ b/docs/versioned-plugins/filters/cipher-v2.0.7.asciidoc @@ -0,0 +1,243 @@ +:plugin: cipher +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.7 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-cipher/blob/v2.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cipher filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter parses a source and apply a cipher or decipher before +storing it in the target. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cipher Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-base64>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cipher_padding>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-iv_random_length>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_pad>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-key_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_cipher_reuse>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-algorithm"] +===== `algorithm` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The cipher algorithm + +A list of supported algorithms can be obtained by +[source,ruby] + puts OpenSSL::Cipher.ciphers + +[id="{version}-plugins-{type}s-{plugin}-base64"] +===== `base64` + + * Value type is <> + * Default value is `true` + +Do we have to perform a `base64` decode or encode? + +If we are decrypting, `base64` decode will be done before. +If we are encrypting, `base64` will be done after. + + +[id="{version}-plugins-{type}s-{plugin}-cipher_padding"] +===== `cipher_padding` + + * Value type is <> + * There is no default value for this setting. + +Cipher padding to use. Enables or disables padding. + +By default encryption operations are padded using standard block padding +and the padding is checked and removed when decrypting. If the pad +parameter is zero then no padding is performed, the total amount of data +encrypted or decrypted must then be a multiple of the block size or an +error will occur. + +See EVP_CIPHER_CTX_set_padding for further information. + +We are using Openssl jRuby which uses default padding to PKCS5Padding +If you want to change it, set this parameter. If you want to disable +it, Set this parameter to 0 +[source,ruby] + filter { cipher { cipher_padding => 0 }} + +[id="{version}-plugins-{type}s-{plugin}-iv"] +===== `iv` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +The initialization vector to use (statically hard-coded). For +a random IV see the iv_random_length property + +NOTE: If iv_random_length is set, it takes precedence over any value set for "iv" + +The cipher modes CBC, CFB, OFB and CTR all need an "initialization +vector", or short, IV. ECB mode is the only mode that does not require +an IV, but there is almost no legitimate use case for this mode +because of the fact that it does not sufficiently hide plaintext patterns. + +For AES algorithms set this to a 16 byte string. +[source,ruby] + filter { cipher { iv => "1234567890123456" }} + +Deprecated: Please use `iv_random_length` instead + +[id="{version}-plugins-{type}s-{plugin}-iv_random_length"] +===== `iv_random_length` + + * Value type is <> + * There is no default value for this setting. + +Force an random IV to be used per encryption invocation and specify +the length of the random IV that will be generated via: + + OpenSSL::Random.random_bytes(int_length) + +If iv_random_length is set, it takes precedence over any value set for "iv" + +Enabling this will force the plugin to generate a unique +random IV for each encryption call. This random IV will be prepended to the +encrypted result bytes and then base64 encoded. On decryption "iv_random_length" must +also be set to utilize this feature. Random IV's are better than statically +hardcoded IVs + +For AES algorithms you can set this to a 16 +[source,ruby] + filter { cipher { iv_random_length => 16 }} + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * There is no default value for this setting. + +The key to use + +NOTE: If you encounter an error message at runtime containing the following: + +"java.security.InvalidKeyException: Illegal key size: possibly you need to install +Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files for your JRE" + +Please read the following: https://github.com/jruby/jruby/wiki/UnlimitedStrengthCrypto + + +[id="{version}-plugins-{type}s-{plugin}-key_pad"] +===== `key_pad` + + * Value type is <> + * Default value is `"\u0000"` + +The character used to pad the key + +[id="{version}-plugins-{type}s-{plugin}-key_size"] +===== `key_size` + + * Value type is <> + * Default value is `16` + +The key size to pad + +It depends of the cipher algorithm. If your key doesn't need +padding, don't set this parameter + +Example, for AES-128, we must have 16 char long key. AES-256 = 32 chars +[source,ruby] + filter { cipher { key_size => 16 } + + +[id="{version}-plugins-{type}s-{plugin}-max_cipher_reuse"] +===== `max_cipher_reuse` + + * Value type is <> + * Default value is `1` + +If this is set the internal Cipher instance will be +re-used up to @max_cipher_reuse times before being +reset() and re-created from scratch. This is an option +for efficiency where lots of data is being encrypted +and decrypted using this filter. This lets the filter +avoid creating new Cipher instances over and over +for each encrypt/decrypt operation. + +This is optional, the default is no re-use of the Cipher +instance and max_cipher_reuse = 1 by default +[source,ruby] + filter { cipher { max_cipher_reuse => 1000 }} + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Encrypting or decrypting some data + +Valid values are encrypt or decrypt + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The field to perform filter + +Example, to use the @message field (default) : +[source,ruby] + filter { cipher { source => "message" } } + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"message"` + +The name of the container to put the result + +Example, to place the result into crypt : +[source,ruby] + filter { cipher { target => "crypt" } } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/cipher-v3.0.0.asciidoc b/docs/versioned-plugins/filters/cipher-v3.0.0.asciidoc new file mode 100644 index 000000000..b207b1333 --- /dev/null +++ b/docs/versioned-plugins/filters/cipher-v3.0.0.asciidoc @@ -0,0 +1,220 @@ +:plugin: cipher +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.0 +:release_date: 2017-10-02 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-cipher/blob/v3.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cipher filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter parses a source and apply a cipher or decipher before +storing it in the target. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cipher Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-base64>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cipher_padding>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-iv_random_length>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_pad>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-key_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_cipher_reuse>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-algorithm"] +===== `algorithm` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The cipher algorithm + +A list of supported algorithms can be obtained by +[source,ruby] + puts OpenSSL::Cipher.ciphers + +[id="{version}-plugins-{type}s-{plugin}-base64"] +===== `base64` + + * Value type is <> + * Default value is `true` + +Do we have to perform a `base64` decode or encode? + +If we are decrypting, `base64` decode will be done before. +If we are encrypting, `base64` will be done after. + + +[id="{version}-plugins-{type}s-{plugin}-cipher_padding"] +===== `cipher_padding` + + * Value type is <> + * There is no default value for this setting. + +Cipher padding to use. Enables or disables padding. + +By default encryption operations are padded using standard block padding +and the padding is checked and removed when decrypting. If the pad +parameter is zero then no padding is performed, the total amount of data +encrypted or decrypted must then be a multiple of the block size or an +error will occur. + +See EVP_CIPHER_CTX_set_padding for further information. + +We are using Openssl jRuby which uses default padding to PKCS5Padding +If you want to change it, set this parameter. If you want to disable +it, Set this parameter to 0 +[source,ruby] + filter { cipher { cipher_padding => 0 }} + +[id="{version}-plugins-{type}s-{plugin}-iv_random_length"] +===== `iv_random_length` + + * Value type is <> + * There is no default value for this setting. + +Force an random IV to be used per encryption invocation and specify +the length of the random IV that will be generated via: + + OpenSSL::Random.random_bytes(int_length) + +If iv_random_length is set, it takes precedence over any value set for "iv" + +Enabling this will force the plugin to generate a unique +random IV for each encryption call. This random IV will be prepended to the +encrypted result bytes and then base64 encoded. On decryption "iv_random_length" must +also be set to utilize this feature. Random IV's are better than statically +hardcoded IVs + +For AES algorithms you can set this to a 16 +[source,ruby] + filter { cipher { iv_random_length => 16 }} + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * There is no default value for this setting. + +The key to use + +NOTE: If you encounter an error message at runtime containing the following: + +"java.security.InvalidKeyException: Illegal key size: possibly you need to install +Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files for your JRE" + +Please read the following: https://github.com/jruby/jruby/wiki/UnlimitedStrengthCrypto + + +[id="{version}-plugins-{type}s-{plugin}-key_pad"] +===== `key_pad` + + * Value type is <> + * Default value is `"\u0000"` + +The character used to pad the key + +[id="{version}-plugins-{type}s-{plugin}-key_size"] +===== `key_size` + + * Value type is <> + * Default value is `16` + +The key size to pad + +It depends of the cipher algorithm. If your key doesn't need +padding, don't set this parameter + +Example, for AES-128, we must have 16 char long key. AES-256 = 32 chars +[source,ruby] + filter { cipher { key_size => 16 } + + +[id="{version}-plugins-{type}s-{plugin}-max_cipher_reuse"] +===== `max_cipher_reuse` + + * Value type is <> + * Default value is `1` + +If this is set the internal Cipher instance will be +re-used up to @max_cipher_reuse times before being +reset() and re-created from scratch. This is an option +for efficiency where lots of data is being encrypted +and decrypted using this filter. This lets the filter +avoid creating new Cipher instances over and over +for each encrypt/decrypt operation. + +This is optional, the default is no re-use of the Cipher +instance and max_cipher_reuse = 1 by default +[source,ruby] + filter { cipher { max_cipher_reuse => 1000 }} + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Encrypting or decrypting some data + +Valid values are encrypt or decrypt + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The field to perform filter + +Example, to use the @message field (default) : +[source,ruby] + filter { cipher { source => "message" } } + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"message"` + +The name of the container to put the result + +Example, to place the result into crypt : +[source,ruby] + filter { cipher { target => "crypt" } } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/cipher-v3.0.1.asciidoc b/docs/versioned-plugins/filters/cipher-v3.0.1.asciidoc new file mode 100644 index 000000000..009f95aaa --- /dev/null +++ b/docs/versioned-plugins/filters/cipher-v3.0.1.asciidoc @@ -0,0 +1,220 @@ +:plugin: cipher +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-cipher/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cipher filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter parses a source and apply a cipher or decipher before +storing it in the target. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cipher Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-base64>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cipher_padding>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-iv_random_length>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_pad>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-key_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_cipher_reuse>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-algorithm"] +===== `algorithm` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The cipher algorithm + +A list of supported algorithms can be obtained by +[source,ruby] + puts OpenSSL::Cipher.ciphers + +[id="{version}-plugins-{type}s-{plugin}-base64"] +===== `base64` + + * Value type is <> + * Default value is `true` + +Do we have to perform a `base64` decode or encode? + +If we are decrypting, `base64` decode will be done before. +If we are encrypting, `base64` will be done after. + + +[id="{version}-plugins-{type}s-{plugin}-cipher_padding"] +===== `cipher_padding` + + * Value type is <> + * There is no default value for this setting. + +Cipher padding to use. Enables or disables padding. + +By default encryption operations are padded using standard block padding +and the padding is checked and removed when decrypting. If the pad +parameter is zero then no padding is performed, the total amount of data +encrypted or decrypted must then be a multiple of the block size or an +error will occur. + +See EVP_CIPHER_CTX_set_padding for further information. + +We are using Openssl jRuby which uses default padding to PKCS5Padding +If you want to change it, set this parameter. If you want to disable +it, Set this parameter to 0 +[source,ruby] + filter { cipher { cipher_padding => 0 }} + +[id="{version}-plugins-{type}s-{plugin}-iv_random_length"] +===== `iv_random_length` + + * Value type is <> + * There is no default value for this setting. + +Force an random IV to be used per encryption invocation and specify +the length of the random IV that will be generated via: + + OpenSSL::Random.random_bytes(int_length) + +If iv_random_length is set, it takes precedence over any value set for "iv" + +Enabling this will force the plugin to generate a unique +random IV for each encryption call. This random IV will be prepended to the +encrypted result bytes and then base64 encoded. On decryption "iv_random_length" must +also be set to utilize this feature. Random IV's are better than statically +hardcoded IVs + +For AES algorithms you can set this to a 16 +[source,ruby] + filter { cipher { iv_random_length => 16 }} + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * There is no default value for this setting. + +The key to use + +NOTE: If you encounter an error message at runtime containing the following: + +"java.security.InvalidKeyException: Illegal key size: possibly you need to install +Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files for your JRE" + +Please read the following: https://github.com/jruby/jruby/wiki/UnlimitedStrengthCrypto + + +[id="{version}-plugins-{type}s-{plugin}-key_pad"] +===== `key_pad` + + * Value type is <> + * Default value is `"\u0000"` + +The character used to pad the key + +[id="{version}-plugins-{type}s-{plugin}-key_size"] +===== `key_size` + + * Value type is <> + * Default value is `16` + +The key size to pad + +It depends of the cipher algorithm. If your key doesn't need +padding, don't set this parameter + +Example, for AES-128, we must have 16 char long key. AES-256 = 32 chars +[source,ruby] + filter { cipher { key_size => 16 } + + +[id="{version}-plugins-{type}s-{plugin}-max_cipher_reuse"] +===== `max_cipher_reuse` + + * Value type is <> + * Default value is `1` + +If this is set the internal Cipher instance will be +re-used up to @max_cipher_reuse times before being +reset() and re-created from scratch. This is an option +for efficiency where lots of data is being encrypted +and decrypted using this filter. This lets the filter +avoid creating new Cipher instances over and over +for each encrypt/decrypt operation. + +This is optional, the default is no re-use of the Cipher +instance and max_cipher_reuse = 1 by default +[source,ruby] + filter { cipher { max_cipher_reuse => 1000 }} + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Encrypting or decrypting some data + +Valid values are encrypt or decrypt + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The field to perform filter + +Example, to use the @message field (default) : +[source,ruby] + filter { cipher { source => "message" } } + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"message"` + +The name of the container to put the result + +Example, to place the result into crypt : +[source,ruby] + filter { cipher { target => "crypt" } } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/clone-index.asciidoc b/docs/versioned-plugins/filters/clone-index.asciidoc new file mode 100644 index 000000000..e411c7f54 --- /dev/null +++ b/docs/versioned-plugins/filters/clone-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: clone +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::clone-v3.0.5.asciidoc[] +include::clone-v3.0.4.asciidoc[] +include::clone-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/filters/clone-v3.0.3.asciidoc b/docs/versioned-plugins/filters/clone-v3.0.3.asciidoc new file mode 100644 index 000000000..1feaab8e7 --- /dev/null +++ b/docs/versioned-plugins/filters/clone-v3.0.3.asciidoc @@ -0,0 +1,57 @@ +:plugin: clone +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-clone/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Clone filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The clone filter is for duplicating events. +A clone will be created for each type in the clone list. +The original event is left unchanged. +Created events are inserted into the pipeline +as normal events and will be processed by the remaining pipeline configuration +starting from the filter that generated them (i.e. this plugin). + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Clone Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-clones>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-clones"] +===== `clones` + + * Value type is <> + * Default value is `[]` + +A new clone will be created with the given type for each type in this list. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/clone-v3.0.4.asciidoc b/docs/versioned-plugins/filters/clone-v3.0.4.asciidoc new file mode 100644 index 000000000..96e3bc45c --- /dev/null +++ b/docs/versioned-plugins/filters/clone-v3.0.4.asciidoc @@ -0,0 +1,57 @@ +:plugin: clone +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-clone/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Clone filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The clone filter is for duplicating events. +A clone will be created for each type in the clone list. +The original event is left unchanged. +Created events are inserted into the pipeline +as normal events and will be processed by the remaining pipeline configuration +starting from the filter that generated them (i.e. this plugin). + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Clone Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-clones>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-clones"] +===== `clones` + + * Value type is <> + * Default value is `[]` + +A new clone will be created with the given type for each type in this list. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/clone-v3.0.5.asciidoc b/docs/versioned-plugins/filters/clone-v3.0.5.asciidoc new file mode 100644 index 000000000..00a235bde --- /dev/null +++ b/docs/versioned-plugins/filters/clone-v3.0.5.asciidoc @@ -0,0 +1,57 @@ +:plugin: clone +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-clone/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Clone filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The clone filter is for duplicating events. +A clone will be created for each type in the clone list. +The original event is left unchanged. +Created events are inserted into the pipeline +as normal events and will be processed by the remaining pipeline configuration +starting from the filter that generated them (i.e. this plugin). + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Clone Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-clones>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-clones"] +===== `clones` + + * Value type is <> + * Default value is `[]` + +A new clone will be created with the given type for each type in this list. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/cloudfoundry-index.asciidoc b/docs/versioned-plugins/filters/cloudfoundry-index.asciidoc new file mode 100644 index 000000000..70c2b846c --- /dev/null +++ b/docs/versioned-plugins/filters/cloudfoundry-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: cloudfoundry +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/filters/collate-index.asciidoc b/docs/versioned-plugins/filters/collate-index.asciidoc new file mode 100644 index 000000000..0998a1c97 --- /dev/null +++ b/docs/versioned-plugins/filters/collate-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: collate +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::collate-v2.0.6.asciidoc[] +include::collate-v2.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/filters/collate-v2.0.5.asciidoc b/docs/versioned-plugins/filters/collate-v2.0.5.asciidoc new file mode 100644 index 000000000..6aa1bc021 --- /dev/null +++ b/docs/versioned-plugins/filters/collate-v2.0.5.asciidoc @@ -0,0 +1,84 @@ +:plugin: collate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-collate/blob/v2.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Collate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Collate events by time or count. + +The original goal of this filter was to merge the logs from different sources +by the time of log, for example, in real-time log collection, logs can be +collated by amount of 3000 logs or can be collated in 30 seconds. + +The config looks like this: +[source,ruby] + filter { + collate { + count => 3000 + interval => "30s" + order => "ascending" + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Collate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-order>> |<>, one of `["ascending", "descending"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-count"] +===== `count` + + * Value type is <> + * Default value is `1000` + +How many logs should be collated. + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `"1m"` + +The `interval` is the time window which how long the logs should be collated. (default `1m`) + +[id="{version}-plugins-{type}s-{plugin}-order"] +===== `order` + + * Value can be any of: `ascending`, `descending` + * Default value is `"ascending"` + +The `order` collated events should appear in. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/collate-v2.0.6.asciidoc b/docs/versioned-plugins/filters/collate-v2.0.6.asciidoc new file mode 100644 index 000000000..bf5e4303d --- /dev/null +++ b/docs/versioned-plugins/filters/collate-v2.0.6.asciidoc @@ -0,0 +1,84 @@ +:plugin: collate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.6 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-collate/blob/v2.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Collate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Collate events by time or count. + +The original goal of this filter was to merge the logs from different sources +by the time of log, for example, in real-time log collection, logs can be +collated by amount of 3000 logs or can be collated in 30 seconds. + +The config looks like this: +[source,ruby] + filter { + collate { + count => 3000 + interval => "30s" + order => "ascending" + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Collate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-order>> |<>, one of `["ascending", "descending"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-count"] +===== `count` + + * Value type is <> + * Default value is `1000` + +How many logs should be collated. + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `"1m"` + +The `interval` is the time window which how long the logs should be collated. (default `1m`) + +[id="{version}-plugins-{type}s-{plugin}-order"] +===== `order` + + * Value can be any of: `ascending`, `descending` + * Default value is `"ascending"` + +The `order` collated events should appear in. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/csv-index.asciidoc b/docs/versioned-plugins/filters/csv-index.asciidoc new file mode 100644 index 000000000..6e0c3d04f --- /dev/null +++ b/docs/versioned-plugins/filters/csv-index.asciidoc @@ -0,0 +1,20 @@ +:plugin: csv +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-11-03 +| <> | 2017-08-15 +| <> | 2017-06-23 +| <> | 2017-05-24 +|======================================================================= + +include::csv-v3.0.7.asciidoc[] +include::csv-v3.0.6.asciidoc[] +include::csv-v3.0.5.asciidoc[] +include::csv-v3.0.4.asciidoc[] +include::csv-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/filters/csv-v3.0.3.asciidoc b/docs/versioned-plugins/filters/csv-v3.0.3.asciidoc new file mode 100644 index 000000000..2b6578204 --- /dev/null +++ b/docs/versioned-plugins/filters/csv-v3.0.3.asciidoc @@ -0,0 +1,152 @@ +:plugin: csv +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-05-24 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-csv/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Csv + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The CSV filter takes an event field containing CSV data, parses it, +and stores it as individual fields (can optionally specify the names). +This filter can also parse data with any separator, not just commas. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Csv Filter Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-autodetect_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-autogenerate_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-columns>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-quote_char>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-skip_empty_columns>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-autodetect_column_names"] +===== `autodetect_column_names` + + * Value type is <> + * Default value is `false` + +Define whether column names should be auto-detected from the header column or not. +Defaults to false. + +[id="{version}-plugins-{type}s-{plugin}-autogenerate_column_names"] +===== `autogenerate_column_names` + + * Value type is <> + * Default value is `true` + +Define whether column names should autogenerated or not. +Defaults to true. If set to false, columns not having a header specified will not be parsed. + +[id="{version}-plugins-{type}s-{plugin}-columns"] +===== `columns` + + * Value type is <> + * Default value is `[]` + +Define a list of column names (in the order they appear in the CSV, +as if it were a header line). If `columns` is not configured, or there +are not enough columns specified, the default column names are +"column1", "column2", etc. In the case that there are more columns +in the data than specified in this column list, extra columns will be auto-numbered: +(e.g. "user_defined_1", "user_defined_2", "column3", "column4", etc.) + +[id="{version}-plugins-{type}s-{plugin}-convert"] +===== `convert` + + * Value type is <> + * Default value is `{}` + +Define a set of datatype conversions to be applied to columns. +Possible conversions are integer, float, date, date_time, boolean + +# Example: +[source,ruby] + filter { + csv { + convert => { + "column1" => "integer" + "column2" => "boolean" + } + } + } + +[id="{version}-plugins-{type}s-{plugin}-quote_char"] +===== `quote_char` + + * Value type is <> + * Default value is `"\""` + +Define the character used to quote CSV fields. If this is not specified +the default is a double quote `"`. +Optional. + +[id="{version}-plugins-{type}s-{plugin}-separator"] +===== `separator` + + * Value type is <> + * Default value is `","` + +Define the column separator value. If this is not specified, the default +is a comma `,`. If you want to define a tabulation as a separator, you need +to set the value to the actual tab character and not `\t`. +Optional. + +[id="{version}-plugins-{type}s-{plugin}-skip_empty_columns"] +===== `skip_empty_columns` + + * Value type is <> + * Default value is `false` + +Define whether empty columns should be skipped. +Defaults to false. If set to true, columns containing no value will not get set. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The CSV data in the value of the `source` field will be expanded into a +data structure. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define target field for placing the data. +Defaults to writing to the root of the event. + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/csv-v3.0.4.asciidoc b/docs/versioned-plugins/filters/csv-v3.0.4.asciidoc new file mode 100644 index 000000000..7ce6f5168 --- /dev/null +++ b/docs/versioned-plugins/filters/csv-v3.0.4.asciidoc @@ -0,0 +1,153 @@ +:plugin: csv +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-csv/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Csv filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The CSV filter takes an event field containing CSV data, parses it, +and stores it as individual fields (can optionally specify the names). +This filter can also parse data with any separator, not just commas. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Csv Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-autodetect_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-autogenerate_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-columns>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-quote_char>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-skip_empty_columns>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-autodetect_column_names"] +===== `autodetect_column_names` + + * Value type is <> + * Default value is `false` + +Define whether column names should be auto-detected from the header column or not. +Defaults to false. + +[id="{version}-plugins-{type}s-{plugin}-autogenerate_column_names"] +===== `autogenerate_column_names` + + * Value type is <> + * Default value is `true` + +Define whether column names should autogenerated or not. +Defaults to true. If set to false, columns not having a header specified will not be parsed. + +[id="{version}-plugins-{type}s-{plugin}-columns"] +===== `columns` + + * Value type is <> + * Default value is `[]` + +Define a list of column names (in the order they appear in the CSV, +as if it were a header line). If `columns` is not configured, or there +are not enough columns specified, the default column names are +"column1", "column2", etc. In the case that there are more columns +in the data than specified in this column list, extra columns will be auto-numbered: +(e.g. "user_defined_1", "user_defined_2", "column3", "column4", etc.) + +[id="{version}-plugins-{type}s-{plugin}-convert"] +===== `convert` + + * Value type is <> + * Default value is `{}` + +Define a set of datatype conversions to be applied to columns. +Possible conversions are integer, float, date, date_time, boolean + +# Example: +[source,ruby] + filter { + csv { + convert => { + "column1" => "integer" + "column2" => "boolean" + } + } + } + +[id="{version}-plugins-{type}s-{plugin}-quote_char"] +===== `quote_char` + + * Value type is <> + * Default value is `"\""` + +Define the character used to quote CSV fields. If this is not specified +the default is a double quote `"`. +Optional. + +[id="{version}-plugins-{type}s-{plugin}-separator"] +===== `separator` + + * Value type is <> + * Default value is `","` + +Define the column separator value. If this is not specified, the default +is a comma `,`. If you want to define a tabulation as a separator, you need +to set the value to the actual tab character and not `\t`. +Optional. + +[id="{version}-plugins-{type}s-{plugin}-skip_empty_columns"] +===== `skip_empty_columns` + + * Value type is <> + * Default value is `false` + +Define whether empty columns should be skipped. +Defaults to false. If set to true, columns containing no value will not get set. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The CSV data in the value of the `source` field will be expanded into a +data structure. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define target field for placing the data. +Defaults to writing to the root of the event. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/csv-v3.0.5.asciidoc b/docs/versioned-plugins/filters/csv-v3.0.5.asciidoc new file mode 100644 index 000000000..01767cdaa --- /dev/null +++ b/docs/versioned-plugins/filters/csv-v3.0.5.asciidoc @@ -0,0 +1,153 @@ +:plugin: csv +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-csv/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Csv filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The CSV filter takes an event field containing CSV data, parses it, +and stores it as individual fields (can optionally specify the names). +This filter can also parse data with any separator, not just commas. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Csv Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-autodetect_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-autogenerate_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-columns>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-quote_char>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-skip_empty_columns>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-autodetect_column_names"] +===== `autodetect_column_names` + + * Value type is <> + * Default value is `false` + +Define whether column names should be auto-detected from the header column or not. +Defaults to false. + +[id="{version}-plugins-{type}s-{plugin}-autogenerate_column_names"] +===== `autogenerate_column_names` + + * Value type is <> + * Default value is `true` + +Define whether column names should autogenerated or not. +Defaults to true. If set to false, columns not having a header specified will not be parsed. + +[id="{version}-plugins-{type}s-{plugin}-columns"] +===== `columns` + + * Value type is <> + * Default value is `[]` + +Define a list of column names (in the order they appear in the CSV, +as if it were a header line). If `columns` is not configured, or there +are not enough columns specified, the default column names are +"column1", "column2", etc. In the case that there are more columns +in the data than specified in this column list, extra columns will be auto-numbered: +(e.g. "user_defined_1", "user_defined_2", "column3", "column4", etc.) + +[id="{version}-plugins-{type}s-{plugin}-convert"] +===== `convert` + + * Value type is <> + * Default value is `{}` + +Define a set of datatype conversions to be applied to columns. +Possible conversions are integer, float, date, date_time, boolean + +# Example: +[source,ruby] + filter { + csv { + convert => { + "column1" => "integer" + "column2" => "boolean" + } + } + } + +[id="{version}-plugins-{type}s-{plugin}-quote_char"] +===== `quote_char` + + * Value type is <> + * Default value is `"\""` + +Define the character used to quote CSV fields. If this is not specified +the default is a double quote `"`. +Optional. + +[id="{version}-plugins-{type}s-{plugin}-separator"] +===== `separator` + + * Value type is <> + * Default value is `","` + +Define the column separator value. If this is not specified, the default +is a comma `,`. If you want to define a tabulation as a separator, you need +to set the value to the actual tab character and not `\t`. +Optional. + +[id="{version}-plugins-{type}s-{plugin}-skip_empty_columns"] +===== `skip_empty_columns` + + * Value type is <> + * Default value is `false` + +Define whether empty columns should be skipped. +Defaults to false. If set to true, columns containing no value will not get set. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The CSV data in the value of the `source` field will be expanded into a +data structure. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define target field for placing the data. +Defaults to writing to the root of the event. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/csv-v3.0.6.asciidoc b/docs/versioned-plugins/filters/csv-v3.0.6.asciidoc new file mode 100644 index 000000000..441d2a0fd --- /dev/null +++ b/docs/versioned-plugins/filters/csv-v3.0.6.asciidoc @@ -0,0 +1,153 @@ +:plugin: csv +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-11-03 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-csv/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Csv filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The CSV filter takes an event field containing CSV data, parses it, +and stores it as individual fields (can optionally specify the names). +This filter can also parse data with any separator, not just commas. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Csv Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-autodetect_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-autogenerate_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-columns>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-quote_char>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-skip_empty_columns>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-autodetect_column_names"] +===== `autodetect_column_names` + + * Value type is <> + * Default value is `false` + +Define whether column names should be auto-detected from the header column or not. +Defaults to false. + +[id="{version}-plugins-{type}s-{plugin}-autogenerate_column_names"] +===== `autogenerate_column_names` + + * Value type is <> + * Default value is `true` + +Define whether column names should autogenerated or not. +Defaults to true. If set to false, columns not having a header specified will not be parsed. + +[id="{version}-plugins-{type}s-{plugin}-columns"] +===== `columns` + + * Value type is <> + * Default value is `[]` + +Define a list of column names (in the order they appear in the CSV, +as if it were a header line). If `columns` is not configured, or there +are not enough columns specified, the default column names are +"column1", "column2", etc. In the case that there are more columns +in the data than specified in this column list, extra columns will be auto-numbered: +(e.g. "user_defined_1", "user_defined_2", "column3", "column4", etc.) + +[id="{version}-plugins-{type}s-{plugin}-convert"] +===== `convert` + + * Value type is <> + * Default value is `{}` + +Define a set of datatype conversions to be applied to columns. +Possible conversions are integer, float, date, date_time, boolean + +# Example: +[source,ruby] + filter { + csv { + convert => { + "column1" => "integer" + "column2" => "boolean" + } + } + } + +[id="{version}-plugins-{type}s-{plugin}-quote_char"] +===== `quote_char` + + * Value type is <> + * Default value is `"\""` + +Define the character used to quote CSV fields. If this is not specified +the default is a double quote `"`. +Optional. + +[id="{version}-plugins-{type}s-{plugin}-separator"] +===== `separator` + + * Value type is <> + * Default value is `","` + +Define the column separator value. If this is not specified, the default +is a comma `,`. If you want to define a tabulation as a separator, you need +to set the value to the actual tab character and not `\t`. +Optional. + +[id="{version}-plugins-{type}s-{plugin}-skip_empty_columns"] +===== `skip_empty_columns` + + * Value type is <> + * Default value is `false` + +Define whether empty columns should be skipped. +Defaults to false. If set to true, columns containing no value will not get set. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The CSV data in the value of the `source` field will be expanded into a +data structure. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define target field for placing the data. +Defaults to writing to the root of the event. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/csv-v3.0.7.asciidoc b/docs/versioned-plugins/filters/csv-v3.0.7.asciidoc new file mode 100644 index 000000000..a6e1c05f2 --- /dev/null +++ b/docs/versioned-plugins/filters/csv-v3.0.7.asciidoc @@ -0,0 +1,153 @@ +:plugin: csv +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.7 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-csv/blob/v3.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Csv filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The CSV filter takes an event field containing CSV data, parses it, +and stores it as individual fields (can optionally specify the names). +This filter can also parse data with any separator, not just commas. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Csv Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-autodetect_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-autogenerate_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-columns>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-quote_char>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-skip_empty_columns>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-autodetect_column_names"] +===== `autodetect_column_names` + + * Value type is <> + * Default value is `false` + +Define whether column names should be auto-detected from the header column or not. +Defaults to false. + +[id="{version}-plugins-{type}s-{plugin}-autogenerate_column_names"] +===== `autogenerate_column_names` + + * Value type is <> + * Default value is `true` + +Define whether column names should autogenerated or not. +Defaults to true. If set to false, columns not having a header specified will not be parsed. + +[id="{version}-plugins-{type}s-{plugin}-columns"] +===== `columns` + + * Value type is <> + * Default value is `[]` + +Define a list of column names (in the order they appear in the CSV, +as if it were a header line). If `columns` is not configured, or there +are not enough columns specified, the default column names are +"column1", "column2", etc. In the case that there are more columns +in the data than specified in this column list, extra columns will be auto-numbered: +(e.g. "user_defined_1", "user_defined_2", "column3", "column4", etc.) + +[id="{version}-plugins-{type}s-{plugin}-convert"] +===== `convert` + + * Value type is <> + * Default value is `{}` + +Define a set of datatype conversions to be applied to columns. +Possible conversions are integer, float, date, date_time, boolean + +# Example: +[source,ruby] + filter { + csv { + convert => { + "column1" => "integer" + "column2" => "boolean" + } + } + } + +[id="{version}-plugins-{type}s-{plugin}-quote_char"] +===== `quote_char` + + * Value type is <> + * Default value is `"\""` + +Define the character used to quote CSV fields. If this is not specified +the default is a double quote `"`. +Optional. + +[id="{version}-plugins-{type}s-{plugin}-separator"] +===== `separator` + + * Value type is <> + * Default value is `","` + +Define the column separator value. If this is not specified, the default +is a comma `,`. If you want to define a tabulation as a separator, you need +to set the value to the actual tab character and not `\t`. +Optional. + +[id="{version}-plugins-{type}s-{plugin}-skip_empty_columns"] +===== `skip_empty_columns` + + * Value type is <> + * Default value is `false` + +Define whether empty columns should be skipped. +Defaults to false. If set to true, columns containing no value will not get set. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The CSV data in the value of the `source` field will be expanded into a +data structure. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define target field for placing the data. +Defaults to writing to the root of the event. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/date-index.asciidoc b/docs/versioned-plugins/filters/date-index.asciidoc new file mode 100644 index 000000000..de442d9a2 --- /dev/null +++ b/docs/versioned-plugins/filters/date-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: date +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::date-v3.1.9.asciidoc[] +include::date-v3.1.8.asciidoc[] +include::date-v3.1.7.asciidoc[] + diff --git a/docs/versioned-plugins/filters/date-v3.1.7.asciidoc b/docs/versioned-plugins/filters/date-v3.1.7.asciidoc new file mode 100644 index 000000000..c8a6c75fe --- /dev/null +++ b/docs/versioned-plugins/filters/date-v3.1.7.asciidoc @@ -0,0 +1,215 @@ +:plugin: date +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.7 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-date/blob/v3.1.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Date filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The date filter is used for parsing dates from fields, and then using that +date or timestamp as the logstash timestamp for the event. + +For example, syslog events usually have timestamps like this: +[source,ruby] + "Apr 17 09:32:01" + +You would use the date format `MMM dd HH:mm:ss` to parse this. + +The date filter is especially important for sorting events and for +backfilling old data. If you don't get the date correct in your +event, then searching for them later will likely sort out of order. + +In the absence of this filter, logstash will choose a timestamp based on the +first time it sees the event (at input time), if the timestamp is not already +set in the event. For example, with file input, the timestamp is set to the +time of each read. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Date Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-locale>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timezone>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-locale"] +===== `locale` + + * Value type is <> + * There is no default value for this setting. + +Specify a locale to be used for date parsing using either IETF-BCP47 or POSIX language tag. +Simple examples are `en`,`en-US` for BCP47 or `en_US` for POSIX. + +The locale is mostly necessary to be set for parsing month names (pattern with `MMM`) and +weekday names (pattern with `EEE`). + +If not specified, the platform default will be used but for non-english platform default +an english parser will also be used as a fallback mechanism. + +[id="{version}-plugins-{type}s-{plugin}-match"] +===== `match` + + * Value type is <> + * Default value is `[]` + +An array with field name first, and format patterns following, `[ field, +formats... ]` + +If your time field has multiple possible formats, you can do this: +[source,ruby] + match => [ "logdate", "MMM dd yyyy HH:mm:ss", + "MMM d yyyy HH:mm:ss", "ISO8601" ] + +The above will match a syslog (rfc3164) or `iso8601` timestamp. + +There are a few special exceptions. The following format literals exist +to help you save time and ensure correctness of date parsing. + +* `ISO8601` - should parse any valid ISO8601 timestamp, such as + `2011-04-19T03:44:01.103Z` +* `UNIX` - will parse *float or int* value expressing unix time in seconds since epoch like 1326149001.132 as well as 1326149001 +* `UNIX_MS` - will parse **int** value expressing unix time in milliseconds since epoch like 1366125117000 +* `TAI64N` - will parse tai64n time values + +For example, if you have a field `logdate`, with a value that looks like +`Aug 13 2010 00:03:44`, you would use this configuration: +[source,ruby] + filter { + date { + match => [ "logdate", "MMM dd yyyy HH:mm:ss" ] + } + } + +If your field is nested in your structure, you can use the nested +syntax `[foo][bar]` to match its value. For more information, please refer to +<> + +*More details on the syntax* + +The syntax used for parsing date and time text uses letters to indicate the +kind of time value (month, minute, etc), and a repetition of letters to +indicate the form of that value (2-digit month, full month name, etc). + +Here's what you can use to parse dates and times: + +[horizontal] +y:: year + yyyy::: full year number. Example: `2015`. + yy::: two-digit year. Example: `15` for the year 2015. + +M:: month of the year + M::: minimal-digit month. Example: `1` for January and `12` for December. + MM::: two-digit month. zero-padded if needed. Example: `01` for January and `12` for December + MMM::: abbreviated month text. Example: `Jan` for January. Note: The language used depends on your locale. See the `locale` setting for how to change the language. + MMMM::: full month text, Example: `January`. Note: The language used depends on your locale. + +d:: day of the month + d::: minimal-digit day. Example: `1` for the 1st of the month. + dd::: two-digit day, zero-padded if needed. Example: `01` for the 1st of the month. + +H:: hour of the day (24-hour clock) + H::: minimal-digit hour. Example: `0` for midnight. + HH::: two-digit hour, zero-padded if needed. Example: `00` for midnight. + +m:: minutes of the hour (60 minutes per hour) + m::: minimal-digit minutes. Example: `0`. + mm::: two-digit minutes, zero-padded if needed. Example: `00`. + +s:: seconds of the minute (60 seconds per minute) + s::: minimal-digit seconds. Example: `0`. + ss::: two-digit seconds, zero-padded if needed. Example: `00`. + +S:: fraction of a second + *Maximum precision is milliseconds (`SSS`). Beyond that, zeroes are appended.* + S::: tenths of a second. Example: `0` for a subsecond value `012` + SS::: hundredths of a second. Example: `01` for a subsecond value `01` + SSS::: thousandths of a second. Example: `012` for a subsecond value `012` + +Z:: time zone offset or identity + Z::: Timezone offset structured as HHmm (hour and minutes offset from Zulu/UTC). Example: `-0700`. + ZZ::: Timezone offset structured as HH:mm (colon in between hour and minute offsets). Example: `-07:00`. + ZZZ::: Timezone identity. Example: `America/Los_Angeles`. Note: Valid IDs are listed on the http://joda-time.sourceforge.net/timezones.html[Joda.org available time zones page]. + +z:: time zone names. *Time zone names ('z') cannot be parsed.* + +w:: week of the year + w::: minimal-digit week. Example: `1`. + ww::: two-digit week, zero-padded if needed. Example: `01`. + +D:: day of the year + +e:: day of the week (number) + +E:: day of the week (text) + E, EE, EEE::: Abbreviated day of the week. Example: `Mon`, `Tue`, `Wed`, `Thu`, `Fri`, `Sat`, `Sun`. Note: The actual language of this will depend on your locale. + EEEE::: The full text day of the week. Example: `Monday`, `Tuesday`, ... Note: The actual language of this will depend on your locale. + +For non-formatting syntax, you'll need to put single-quote characters around the value. For example, if you were parsing ISO8601 time, "2015-01-01T01:12:23" that little "T" isn't a valid time format, and you want to say "literally, a T", your format would be this: "yyyy-MM-dd'T'HH:mm:ss" + +Other less common date units, such as era (G), century \(C), am/pm (a), and # more, can be learned about on the +http://www.joda.org/joda-time/key_format.html[joda-time documentation]. + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_dateparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"@timestamp"` + +Store the matching timestamp into the given target field. If not provided, +default to updating the `@timestamp` field of the event. + +[id="{version}-plugins-{type}s-{plugin}-timezone"] +===== `timezone` + + * Value type is <> + * There is no default value for this setting. + +Specify a time zone canonical ID to be used for date parsing. +The valid IDs are listed on the http://joda-time.sourceforge.net/timezones.html[Joda.org available time zones page]. +This is useful in case the time zone cannot be extracted from the value, +and is not the platform default. +If this is not specified the platform default will be used. +Canonical ID is good as it takes care of daylight saving time for you +For example, `America/Los_Angeles` or `Europe/Paris` are valid IDs. +This field can be dynamic and include parts of the event using the `%{field}` syntax + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/date-v3.1.8.asciidoc b/docs/versioned-plugins/filters/date-v3.1.8.asciidoc new file mode 100644 index 000000000..78186ffd3 --- /dev/null +++ b/docs/versioned-plugins/filters/date-v3.1.8.asciidoc @@ -0,0 +1,215 @@ +:plugin: date +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.8 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-date/blob/v3.1.8/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Date filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The date filter is used for parsing dates from fields, and then using that +date or timestamp as the logstash timestamp for the event. + +For example, syslog events usually have timestamps like this: +[source,ruby] + "Apr 17 09:32:01" + +You would use the date format `MMM dd HH:mm:ss` to parse this. + +The date filter is especially important for sorting events and for +backfilling old data. If you don't get the date correct in your +event, then searching for them later will likely sort out of order. + +In the absence of this filter, logstash will choose a timestamp based on the +first time it sees the event (at input time), if the timestamp is not already +set in the event. For example, with file input, the timestamp is set to the +time of each read. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Date Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-locale>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timezone>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-locale"] +===== `locale` + + * Value type is <> + * There is no default value for this setting. + +Specify a locale to be used for date parsing using either IETF-BCP47 or POSIX language tag. +Simple examples are `en`,`en-US` for BCP47 or `en_US` for POSIX. + +The locale is mostly necessary to be set for parsing month names (pattern with `MMM`) and +weekday names (pattern with `EEE`). + +If not specified, the platform default will be used but for non-english platform default +an english parser will also be used as a fallback mechanism. + +[id="{version}-plugins-{type}s-{plugin}-match"] +===== `match` + + * Value type is <> + * Default value is `[]` + +An array with field name first, and format patterns following, `[ field, +formats... ]` + +If your time field has multiple possible formats, you can do this: +[source,ruby] + match => [ "logdate", "MMM dd yyyy HH:mm:ss", + "MMM d yyyy HH:mm:ss", "ISO8601" ] + +The above will match a syslog (rfc3164) or `iso8601` timestamp. + +There are a few special exceptions. The following format literals exist +to help you save time and ensure correctness of date parsing. + +* `ISO8601` - should parse any valid ISO8601 timestamp, such as + `2011-04-19T03:44:01.103Z` +* `UNIX` - will parse *float or int* value expressing unix time in seconds since epoch like 1326149001.132 as well as 1326149001 +* `UNIX_MS` - will parse **int** value expressing unix time in milliseconds since epoch like 1366125117000 +* `TAI64N` - will parse tai64n time values + +For example, if you have a field `logdate`, with a value that looks like +`Aug 13 2010 00:03:44`, you would use this configuration: +[source,ruby] + filter { + date { + match => [ "logdate", "MMM dd yyyy HH:mm:ss" ] + } + } + +If your field is nested in your structure, you can use the nested +syntax `[foo][bar]` to match its value. For more information, please refer to +<> + +*More details on the syntax* + +The syntax used for parsing date and time text uses letters to indicate the +kind of time value (month, minute, etc), and a repetition of letters to +indicate the form of that value (2-digit month, full month name, etc). + +Here's what you can use to parse dates and times: + +[horizontal] +y:: year + yyyy::: full year number. Example: `2015`. + yy::: two-digit year. Example: `15` for the year 2015. + +M:: month of the year + M::: minimal-digit month. Example: `1` for January and `12` for December. + MM::: two-digit month. zero-padded if needed. Example: `01` for January and `12` for December + MMM::: abbreviated month text. Example: `Jan` for January. Note: The language used depends on your locale. See the `locale` setting for how to change the language. + MMMM::: full month text, Example: `January`. Note: The language used depends on your locale. + +d:: day of the month + d::: minimal-digit day. Example: `1` for the 1st of the month. + dd::: two-digit day, zero-padded if needed. Example: `01` for the 1st of the month. + +H:: hour of the day (24-hour clock) + H::: minimal-digit hour. Example: `0` for midnight. + HH::: two-digit hour, zero-padded if needed. Example: `00` for midnight. + +m:: minutes of the hour (60 minutes per hour) + m::: minimal-digit minutes. Example: `0`. + mm::: two-digit minutes, zero-padded if needed. Example: `00`. + +s:: seconds of the minute (60 seconds per minute) + s::: minimal-digit seconds. Example: `0`. + ss::: two-digit seconds, zero-padded if needed. Example: `00`. + +S:: fraction of a second + *Maximum precision is milliseconds (`SSS`). Beyond that, zeroes are appended.* + S::: tenths of a second. Example: `0` for a subsecond value `012` + SS::: hundredths of a second. Example: `01` for a subsecond value `01` + SSS::: thousandths of a second. Example: `012` for a subsecond value `012` + +Z:: time zone offset or identity + Z::: Timezone offset structured as HHmm (hour and minutes offset from Zulu/UTC). Example: `-0700`. + ZZ::: Timezone offset structured as HH:mm (colon in between hour and minute offsets). Example: `-07:00`. + ZZZ::: Timezone identity. Example: `America/Los_Angeles`. Note: Valid IDs are listed on the http://joda-time.sourceforge.net/timezones.html[Joda.org available time zones page]. + +z:: time zone names. *Time zone names ('z') cannot be parsed.* + +w:: week of the year + w::: minimal-digit week. Example: `1`. + ww::: two-digit week, zero-padded if needed. Example: `01`. + +D:: day of the year + +e:: day of the week (number) + +E:: day of the week (text) + E, EE, EEE::: Abbreviated day of the week. Example: `Mon`, `Tue`, `Wed`, `Thu`, `Fri`, `Sat`, `Sun`. Note: The actual language of this will depend on your locale. + EEEE::: The full text day of the week. Example: `Monday`, `Tuesday`, ... Note: The actual language of this will depend on your locale. + +For non-formatting syntax, you'll need to put single-quote characters around the value. For example, if you were parsing ISO8601 time, "2015-01-01T01:12:23" that little "T" isn't a valid time format, and you want to say "literally, a T", your format would be this: "yyyy-MM-dd'T'HH:mm:ss" + +Other less common date units, such as era (G), century \(C), am/pm (a), and # more, can be learned about on the +http://www.joda.org/joda-time/key_format.html[joda-time documentation]. + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_dateparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"@timestamp"` + +Store the matching timestamp into the given target field. If not provided, +default to updating the `@timestamp` field of the event. + +[id="{version}-plugins-{type}s-{plugin}-timezone"] +===== `timezone` + + * Value type is <> + * There is no default value for this setting. + +Specify a time zone canonical ID to be used for date parsing. +The valid IDs are listed on the http://joda-time.sourceforge.net/timezones.html[Joda.org available time zones page]. +This is useful in case the time zone cannot be extracted from the value, +and is not the platform default. +If this is not specified the platform default will be used. +Canonical ID is good as it takes care of daylight saving time for you +For example, `America/Los_Angeles` or `Europe/Paris` are valid IDs. +This field can be dynamic and include parts of the event using the `%{field}` syntax + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/date-v3.1.9.asciidoc b/docs/versioned-plugins/filters/date-v3.1.9.asciidoc new file mode 100644 index 000000000..18e82c276 --- /dev/null +++ b/docs/versioned-plugins/filters/date-v3.1.9.asciidoc @@ -0,0 +1,215 @@ +:plugin: date +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.9 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-date/blob/v3.1.9/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Date filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The date filter is used for parsing dates from fields, and then using that +date or timestamp as the logstash timestamp for the event. + +For example, syslog events usually have timestamps like this: +[source,ruby] + "Apr 17 09:32:01" + +You would use the date format `MMM dd HH:mm:ss` to parse this. + +The date filter is especially important for sorting events and for +backfilling old data. If you don't get the date correct in your +event, then searching for them later will likely sort out of order. + +In the absence of this filter, logstash will choose a timestamp based on the +first time it sees the event (at input time), if the timestamp is not already +set in the event. For example, with file input, the timestamp is set to the +time of each read. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Date Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-locale>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timezone>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-locale"] +===== `locale` + + * Value type is <> + * There is no default value for this setting. + +Specify a locale to be used for date parsing using either IETF-BCP47 or POSIX language tag. +Simple examples are `en`,`en-US` for BCP47 or `en_US` for POSIX. + +The locale is mostly necessary to be set for parsing month names (pattern with `MMM`) and +weekday names (pattern with `EEE`). + +If not specified, the platform default will be used but for non-english platform default +an english parser will also be used as a fallback mechanism. + +[id="{version}-plugins-{type}s-{plugin}-match"] +===== `match` + + * Value type is <> + * Default value is `[]` + +An array with field name first, and format patterns following, `[ field, +formats... ]` + +If your time field has multiple possible formats, you can do this: +[source,ruby] + match => [ "logdate", "MMM dd yyyy HH:mm:ss", + "MMM d yyyy HH:mm:ss", "ISO8601" ] + +The above will match a syslog (rfc3164) or `iso8601` timestamp. + +There are a few special exceptions. The following format literals exist +to help you save time and ensure correctness of date parsing. + +* `ISO8601` - should parse any valid ISO8601 timestamp, such as + `2011-04-19T03:44:01.103Z` +* `UNIX` - will parse *float or int* value expressing unix time in seconds since epoch like 1326149001.132 as well as 1326149001 +* `UNIX_MS` - will parse **int** value expressing unix time in milliseconds since epoch like 1366125117000 +* `TAI64N` - will parse tai64n time values + +For example, if you have a field `logdate`, with a value that looks like +`Aug 13 2010 00:03:44`, you would use this configuration: +[source,ruby] + filter { + date { + match => [ "logdate", "MMM dd yyyy HH:mm:ss" ] + } + } + +If your field is nested in your structure, you can use the nested +syntax `[foo][bar]` to match its value. For more information, please refer to +<> + +*More details on the syntax* + +The syntax used for parsing date and time text uses letters to indicate the +kind of time value (month, minute, etc), and a repetition of letters to +indicate the form of that value (2-digit month, full month name, etc). + +Here's what you can use to parse dates and times: + +[horizontal] +y:: year + yyyy::: full year number. Example: `2015`. + yy::: two-digit year. Example: `15` for the year 2015. + +M:: month of the year + M::: minimal-digit month. Example: `1` for January and `12` for December. + MM::: two-digit month. zero-padded if needed. Example: `01` for January and `12` for December + MMM::: abbreviated month text. Example: `Jan` for January. Note: The language used depends on your locale. See the `locale` setting for how to change the language. + MMMM::: full month text, Example: `January`. Note: The language used depends on your locale. + +d:: day of the month + d::: minimal-digit day. Example: `1` for the 1st of the month. + dd::: two-digit day, zero-padded if needed. Example: `01` for the 1st of the month. + +H:: hour of the day (24-hour clock) + H::: minimal-digit hour. Example: `0` for midnight. + HH::: two-digit hour, zero-padded if needed. Example: `00` for midnight. + +m:: minutes of the hour (60 minutes per hour) + m::: minimal-digit minutes. Example: `0`. + mm::: two-digit minutes, zero-padded if needed. Example: `00`. + +s:: seconds of the minute (60 seconds per minute) + s::: minimal-digit seconds. Example: `0`. + ss::: two-digit seconds, zero-padded if needed. Example: `00`. + +S:: fraction of a second + *Maximum precision is milliseconds (`SSS`). Beyond that, zeroes are appended.* + S::: tenths of a second. Example: `0` for a subsecond value `012` + SS::: hundredths of a second. Example: `01` for a subsecond value `01` + SSS::: thousandths of a second. Example: `012` for a subsecond value `012` + +Z:: time zone offset or identity + Z::: Timezone offset structured as HHmm (hour and minutes offset from Zulu/UTC). Example: `-0700`. + ZZ::: Timezone offset structured as HH:mm (colon in between hour and minute offsets). Example: `-07:00`. + ZZZ::: Timezone identity. Example: `America/Los_Angeles`. Note: Valid IDs are listed on the http://joda-time.sourceforge.net/timezones.html[Joda.org available time zones page]. + +z:: time zone names. *Time zone names ('z') cannot be parsed.* + +w:: week of the year + w::: minimal-digit week. Example: `1`. + ww::: two-digit week, zero-padded if needed. Example: `01`. + +D:: day of the year + +e:: day of the week (number) + +E:: day of the week (text) + E, EE, EEE::: Abbreviated day of the week. Example: `Mon`, `Tue`, `Wed`, `Thu`, `Fri`, `Sat`, `Sun`. Note: The actual language of this will depend on your locale. + EEEE::: The full text day of the week. Example: `Monday`, `Tuesday`, ... Note: The actual language of this will depend on your locale. + +For non-formatting syntax, you'll need to put single-quote characters around the value. For example, if you were parsing ISO8601 time, "2015-01-01T01:12:23" that little "T" isn't a valid time format, and you want to say "literally, a T", your format would be this: "yyyy-MM-dd'T'HH:mm:ss" + +Other less common date units, such as era (G), century \(C), am/pm (a), and # more, can be learned about on the +http://www.joda.org/joda-time/key_format.html[joda-time documentation]. + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_dateparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"@timestamp"` + +Store the matching timestamp into the given target field. If not provided, +default to updating the `@timestamp` field of the event. + +[id="{version}-plugins-{type}s-{plugin}-timezone"] +===== `timezone` + + * Value type is <> + * There is no default value for this setting. + +Specify a time zone canonical ID to be used for date parsing. +The valid IDs are listed on the http://joda-time.sourceforge.net/timezones.html[Joda.org available time zones page]. +This is useful in case the time zone cannot be extracted from the value, +and is not the platform default. +If this is not specified the platform default will be used. +Canonical ID is good as it takes care of daylight saving time for you +For example, `America/Los_Angeles` or `Europe/Paris` are valid IDs. +This field can be dynamic and include parts of the event using the `%{field}` syntax + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/de_dot-index.asciidoc b/docs/versioned-plugins/filters/de_dot-index.asciidoc new file mode 100644 index 000000000..11b764fcf --- /dev/null +++ b/docs/versioned-plugins/filters/de_dot-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: de_dot +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::de_dot-v1.0.3.asciidoc[] +include::de_dot-v1.0.2.asciidoc[] +include::de_dot-v1.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/de_dot-v1.0.1.asciidoc b/docs/versioned-plugins/filters/de_dot-v1.0.1.asciidoc new file mode 100644 index 000000000..967d56ccb --- /dev/null +++ b/docs/versioned-plugins/filters/de_dot-v1.0.1.asciidoc @@ -0,0 +1,82 @@ +:plugin: de_dot +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-de_dot/blob/v1.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== De_dot filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter _appears_ to rename fields by replacing `.` characters with a different +separator. In reality, it's a somewhat expensive filter that has to copy the +source field contents to a new destination field (whose name no longer contains +dots), and then remove the corresponding source field. + +It should only be used if no other options are available. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== De_dot Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nested>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * There is no default value for this setting. + +The `fields` array should contain a list of known fields to act on. +If undefined, all top-level fields will be checked. Sub-fields must be +manually specified in the array. For example: `["field.suffix","[foo][bar.suffix]"]` +will result in "field_suffix" and nested or sub field ["foo"]["bar_suffix"] + +WARNING: This is an expensive operation. + + +[id="{version}-plugins-{type}s-{plugin}-nested"] +===== `nested` + + * Value type is <> + * Default value is `false` + +If `nested` is _true_, then create sub-fields instead of replacing dots with +a different separator. + +[id="{version}-plugins-{type}s-{plugin}-separator"] +===== `separator` + + * Value type is <> + * Default value is `"_"` + +Replace dots with this value. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/de_dot-v1.0.2.asciidoc b/docs/versioned-plugins/filters/de_dot-v1.0.2.asciidoc new file mode 100644 index 000000000..5588ffa4f --- /dev/null +++ b/docs/versioned-plugins/filters/de_dot-v1.0.2.asciidoc @@ -0,0 +1,82 @@ +:plugin: de_dot +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-de_dot/blob/v1.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== De_dot filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter _appears_ to rename fields by replacing `.` characters with a different +separator. In reality, it's a somewhat expensive filter that has to copy the +source field contents to a new destination field (whose name no longer contains +dots), and then remove the corresponding source field. + +It should only be used if no other options are available. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== De_dot Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nested>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * There is no default value for this setting. + +The `fields` array should contain a list of known fields to act on. +If undefined, all top-level fields will be checked. Sub-fields must be +manually specified in the array. For example: `["field.suffix","[foo][bar.suffix]"]` +will result in "field_suffix" and nested or sub field ["foo"]["bar_suffix"] + +WARNING: This is an expensive operation. + + +[id="{version}-plugins-{type}s-{plugin}-nested"] +===== `nested` + + * Value type is <> + * Default value is `false` + +If `nested` is _true_, then create sub-fields instead of replacing dots with +a different separator. + +[id="{version}-plugins-{type}s-{plugin}-separator"] +===== `separator` + + * Value type is <> + * Default value is `"_"` + +Replace dots with this value. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/de_dot-v1.0.3.asciidoc b/docs/versioned-plugins/filters/de_dot-v1.0.3.asciidoc new file mode 100644 index 000000000..542fcf82a --- /dev/null +++ b/docs/versioned-plugins/filters/de_dot-v1.0.3.asciidoc @@ -0,0 +1,82 @@ +:plugin: de_dot +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-de_dot/blob/v1.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== De_dot filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter _appears_ to rename fields by replacing `.` characters with a different +separator. In reality, it's a somewhat expensive filter that has to copy the +source field contents to a new destination field (whose name no longer contains +dots), and then remove the corresponding source field. + +It should only be used if no other options are available. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== De_dot Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nested>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * There is no default value for this setting. + +The `fields` array should contain a list of known fields to act on. +If undefined, all top-level fields will be checked. Sub-fields must be +manually specified in the array. For example: `["field.suffix","[foo][bar.suffix]"]` +will result in "field_suffix" and nested or sub field ["foo"]["bar_suffix"] + +WARNING: This is an expensive operation. + + +[id="{version}-plugins-{type}s-{plugin}-nested"] +===== `nested` + + * Value type is <> + * Default value is `false` + +If `nested` is _true_, then create sub-fields instead of replacing dots with +a different separator. + +[id="{version}-plugins-{type}s-{plugin}-separator"] +===== `separator` + + * Value type is <> + * Default value is `"_"` + +Replace dots with this value. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/debug-index.asciidoc b/docs/versioned-plugins/filters/debug-index.asciidoc new file mode 100644 index 000000000..1d5a1b98e --- /dev/null +++ b/docs/versioned-plugins/filters/debug-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: debug +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/filters/dissect-index.asciidoc b/docs/versioned-plugins/filters/dissect-index.asciidoc new file mode 100644 index 000000000..d4215f146 --- /dev/null +++ b/docs/versioned-plugins/filters/dissect-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: dissect +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-11-02 +| <> | 2017-06-23 +|======================================================================= + +include::dissect-v1.1.2.asciidoc[] +include::dissect-v1.1.1.asciidoc[] +include::dissect-v1.0.9.asciidoc[] + diff --git a/docs/versioned-plugins/filters/dissect-v1.0.9.asciidoc b/docs/versioned-plugins/filters/dissect-v1.0.9.asciidoc new file mode 100644 index 000000000..41803fe1a --- /dev/null +++ b/docs/versioned-plugins/filters/dissect-v1.0.9.asciidoc @@ -0,0 +1,213 @@ +:plugin: dissect +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.9 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-dissect/blob/v1.0.9/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Dissect filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The Dissect filter is a kind of split operation. Unlike a regular split operation where one delimiter is applied to the whole string, this operation applies a set of delimiters # to a string value. + +Dissect does not use regular expressions and is very fast. + +However, if the structure of your text varies from line to line then Grok is more suitable. + +There is a hybrid case where Dissect can be used to de-structure the section of the line that is reliably repeated and then Grok can be used on the remaining field values with # more regex predictability and less overall work to do. + + +A set of fields and delimiters is called a *dissection*. + +The dissection is described using a set of `%{}` sections: +.... +%{a} - %{b} - %{c} +.... + +A *field* is the text from `%` to `}` inclusive. + +A *delimiter* is the text between `}` and `%` characters. + +[NOTE] +delimiters can't contain these `}{%` characters. + +The config might look like this: +.... + filter { + dissect { + mapping => { + "message" => "%{ts} %{+ts} %{+ts} %{src} %{} %{prog}[%{pid}]: %{msg}" + } + } + } +.... +When dissecting a string from left to right, text is captured upto the first delimiter - this captured text is stored in the first field. This is repeated for each field/# delimiter pair thereafter until the last delimiter is reached, then *the remaining text is stored in the last field*. + + +*The Key:* + +The key is the text between the `%{` and `}`, exclusive of the ?, +, & prefixes and the ordinal suffix. + +`%{?aaa}` - key is `aaa` + +`%{+bbb/3}` - key is `bbb` + +`%{&ccc}` - key is `ccc` + + +*Normal field notation:* + +The found value is added to the Event using the key. + +`%{some_field}` - a normal field has no prefix or suffix + +*Skip field notation:* + +The found value is stored internally but not added to the Event. + +The key, if supplied, is prefixed with a `?`. + +`%{}` is an empty skip field. + +`%{?foo}` is a named skip field. + +*Append field notation:* + +The value is appended to another value or stored if its the first field seen. + +The key is prefixed with a `+`. + +The final value is stored in the Event using the key. + + +[NOTE] +==== +The delimiter found before the field is appended with the value. + +If no delimiter is found before the field, a single space character is used. +==== + +`%{+some_field}` is an append field. + +`%{+some_field/2}` is an append field with an order modifier. + +An order modifier, `/digits`, allows one to reorder the append sequence. + +e.g. for a text of `1 2 3 go`, this `%{+a/2} %{+a/1} %{+a/4} %{+a/3}` will build a key/value of `a => 2 1 go 3` + +Append fields without an order modifier will append in declared order. + +e.g. for a text of `1 2 3 go`, this `%{a} %{b} %{+a}` will build two key/values of `a => 1 3 go, b => 2` + + +*Indirect field notation:* + +The found value is added to the Event using the found value of another field as the key. + +The key is prefixed with a `&`. + +`%{&some_field}` - an indirect field where the key is indirectly sourced from the value of `some_field`. + +e.g. for a text of `error: some_error, some_description`, this `error: %{?err}, %{&err}` will build a key/value of `some_error => some_description`. + +[NOTE] +for append and indirect field the key can refer to a field that already exists in the event before dissection. + +[NOTE] +use a Skip field if you do not want the indirection key/value stored. + +e.g. for a text of `google: 77.98`, this `%{?a}: %{&a}` will build a key/value of `google => 77.98`. + +[NOTE] +=============================== +append and indirect cannot be combined and will fail validation. + +`%{+&something}` - will add a value to the `&something` key, probably not the intended outcome. + +`%{&+something}` will add a value to the `+something` key, again probably unintended. + +=============================== + +*Delimiter repetition:* + +In the source text if a field has variable width padded with delimiters, the padding will be ignored. + +e.g. for texts of: +.... +00000043 ViewReceiver I +000000b3 Peer I +.... +with a dissection of `%{a} %{b} %{c}`; the padding is ignored, `event.get([c]) -> "I"` + +[NOTE] +==== +You probably want to use this filter inside an `if` block. + +This ensures that the event contains a field value with a suitable structure for the dissection. +==== + +For example... +.... +filter { + if [type] == "syslog" or "syslog" in [tags] { + dissect { + mapping => { + "message" => "%{ts} %{+ts} %{+ts} %{src} %{} %{prog}[%{pid}]: %{msg}" + } + } + } +} +.... + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Dissect Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-convert_datatype>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-convert_datatype"] +===== `convert_datatype` + + * Value type is <> + * Default value is `{}` + +With this setting `int` and `float` datatype conversions can be specified. + +These will be done after all `mapping` dissections have taken place. + +Feel free to use this setting on its own without a `mapping` section. + + +For example +[source, ruby] +filter { + dissect { + convert_datatype => { + cpu => "float" + code => "int" + } + } +} + +[id="{version}-plugins-{type}s-{plugin}-mapping"] +===== `mapping` + + * Value type is <> + * Default value is `{}` + +A hash of dissections of `field => value` + +A later dissection can be done on values from a previous dissection or they can be independent. + +For example +[source, ruby] +filter { + dissect { + mapping => { + "message" => "%{field1} %{field2} %{description}" + "description" => "%{field3} %{field4} %{field5}" + } + } +} + +This is useful if you want to keep the field `description` but also +dissect it some more. + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_dissectfailure"]` + +Append values to the `tags` field when dissection fails + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/dissect-v1.1.1.asciidoc b/docs/versioned-plugins/filters/dissect-v1.1.1.asciidoc new file mode 100644 index 000000000..4078fad74 --- /dev/null +++ b/docs/versioned-plugins/filters/dissect-v1.1.1.asciidoc @@ -0,0 +1,283 @@ +:plugin: dissect +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.1.1 +:release_date: 2017-11-02 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-dissect/blob/v1.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Dissect filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The Dissect filter is a kind of split operation. Unlike a regular split operation where one delimiter is applied to +the whole string, this operation applies a set of delimiters to a string value. + +Dissect does not use regular expressions and is very fast. + +However, if the structure of your text varies from line to line then Grok is more suitable. + +There is a hybrid case where Dissect can be used to de-structure the section of the line that is reliably repeated and +then Grok can be used on the remaining field values with more regex predictability and less overall work to do. + + +A set of fields and delimiters is called a *dissection*. + +The dissection is described using a set of `%{}` sections: +.... +%{a} - %{b} - %{c} +.... + +A *field* is the text from `%` to `}` inclusive. + +A *delimiter* is the text between a `}` and next `%{` characters. + +[NOTE] +Any set of characters that do not fit `%{`, `'not }'`, `}` pattern is a delimiter. + +The config might look like this: +.... + filter { + dissect { + mapping => { + "message" => "%{ts} %{+ts} %{+ts} %{src} %{} %{prog}[%{pid}]: %{msg}" + } + } + } +.... +When dissecting a string from left to right, text is captured upto the first delimiter - this captured text is stored in the first field. +This is repeated for each field/# delimiter pair thereafter until the last delimiter is reached, then *the remaining text is stored in the last field*. + + +*The Key:* + +The key is the text between the `%{` and `}`, exclusive of the ?, +, & prefixes and the ordinal suffix. + +`%{?aaa}` - key is `aaa` + +`%{+bbb/3}` - key is `bbb` + +`%{&ccc}` - key is `ccc` + + +===== Normal field notation +The found value is added to the Event using the key. + +`%{some_field}` - a normal field has no prefix or suffix + +*Skip field notation:* + +The found value is stored internally but not added to the Event. + +The key, if supplied, is prefixed with a `?`. + +`%{}` is an empty skip field. + +`%{?foo}` is a named skip field. + +===== Append field notation +The value is appended to another value or stored if its the first field seen. + +The key is prefixed with a `+`. + +The final value is stored in the Event using the key. + + +[NOTE] +==== +The delimiter found before the field is appended with the value. + +If no delimiter is found before the field, a single space character is used. +==== + +`%{+some_field}` is an append field. + +`%{+some_field/2}` is an append field with an order modifier. + +An order modifier, `/digits`, allows one to reorder the append sequence. + +e.g. for a text of `1 2 3 go`, this `%{+a/2} %{+a/1} %{+a/4} %{+a/3}` will build a key/value of `a => 2 1 go 3` + +Append fields without an order modifier will append in declared order. + +e.g. for a text of `1 2 3 go`, this `%{a} %{b} %{+a}` will build two key/values of `a => 1 3 go, b => 2` + + +===== Indirect field notation +The found value is added to the Event using the found value of another field as the key. + +The key is prefixed with a `&`. + +`%{&some_field}` - an indirect field where the key is indirectly sourced from the value of `some_field`. + +e.g. for a text of `error: some_error, some_description`, this `error: %{?err}, %{&err}` will build a key/value of `some_error => some_description`. + +[NOTE] +for append and indirect field the key can refer to a field that already exists in the event before dissection. + +[NOTE] +use a Skip field if you do not want the indirection key/value stored. + +e.g. for a text of `google: 77.98`, this `%{?a}: %{&a}` will build a key/value of `google => 77.98`. + +[NOTE] +=============================== +append and indirect cannot be combined and will fail validation. + +`%{+&something}` - will add a value to the `&something` key, probably not the intended outcome. + +`%{&+something}` will add a value to the `+something` key, again probably unintended. + +=============================== + +==== Multiple Consecutive Delimiter Handling + +[IMPORTANT] +=============================== +Starting from version 1.1.1 of this plugin, multiple found delimiter handling has changed. +Now multiple consecutive delimiters will be seen as missing fields by default and not padding. +If you are already using Dissect and your source text has fields padded with extra delimiters, +you will need to change your config. Please read the section below. +=============================== + +===== Empty data between delimiters +Given this text as the sample used to create a dissection: +.... +John Smith,Big Oaks,Wood Lane,Hambledown,Canterbury,CB34RY +.... +The created dissection, with 6 fields, is: +.... +%{name},%{addr1},%{addr2},%{addr3},%{city},%{zip} +.... +When a line like this is processed: +.... +Jane Doe,4321 Fifth Avenue,,,New York,87432 +.... +Dissect will create an event with empty fields for `addr2 and addr3` like so: +.... +{ + "name": "Jane Doe", + "addr1": "4321 Fifth Avenue", + "addr2": "", + "addr3": "", + "city": "New York" + "zip": "87432" +} +.... + +===== Delimiters used as padding to visually align fields +*Padding to the right hand side* + +Given these texts as the samples used to create a dissection: +.... +00000043 ViewReceive machine-321 +f3000a3b Calc machine-123 +.... +The dissection, with 3 fields, is: +.... +%{id} %{function->} %{server} +.... +Note, above, the second field has a `->` suffix which tells Dissect to ignore padding to its right. + +Dissect will create these events: +.... +{ + "id": "00000043", + "function": "ViewReceive", + "server": "machine-123" +} +{ + "id": "f3000a3b", + "function": "Calc", + "server": "machine-321" +} +.... +[IMPORTANT] +Always add the `->` suffix to the field on the left of the padding. + +*Padding to the left hand side (to the human eye)* + +Given these texts as the samples used to create a dissection: +.... +00000043 ViewReceive machine-321 +f3000a3b Calc machine-123 +.... +The dissection, with 3 fields, is now: +.... +%{id->} %{function} %{server} +.... +Here the `->` suffix moves to the `id` field because Dissect sees the padding as being to the right of the `id` field. + + +==== Conditional processing + +You probably want to use this filter inside an `if` block. + +This ensures that the event contains a field value with a suitable structure for the dissection. + +For example... +.... +filter { + if [type] == "syslog" or "syslog" in [tags] { + dissect { + mapping => { + "message" => "%{ts} %{+ts} %{+ts} %{src} %{} %{prog}[%{pid}]: %{msg}" + } + } + } +} +.... + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Dissect Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-convert_datatype>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-convert_datatype"] +===== `convert_datatype` + + * Value type is <> + * Default value is `{}` + +With this setting `int` and `float` datatype conversions can be specified. + +These will be done after all `mapping` dissections have taken place. + +Feel free to use this setting on its own without a `mapping` section. + + +For example +[source, ruby] +filter { + dissect { + convert_datatype => { + cpu => "float" + code => "int" + } + } +} + +[id="{version}-plugins-{type}s-{plugin}-mapping"] +===== `mapping` + + * Value type is <> + * Default value is `{}` + +A hash of dissections of `field => value` + +A later dissection can be done on values from a previous dissection or they can be independent. + +For example +[source, ruby] +filter { + dissect { + mapping => { + "message" => "%{field1} %{field2} %{description}" + "description" => "%{field3} %{field4} %{field5}" + } + } +} + +This is useful if you want to keep the field `description` but also +dissect it some more. + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_dissectfailure"]` + +Append values to the `tags` field when dissection fails + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/dissect-v1.1.2.asciidoc b/docs/versioned-plugins/filters/dissect-v1.1.2.asciidoc new file mode 100644 index 000000000..8fadf53c5 --- /dev/null +++ b/docs/versioned-plugins/filters/dissect-v1.1.2.asciidoc @@ -0,0 +1,283 @@ +:plugin: dissect +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.1.2 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-dissect/blob/v1.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Dissect filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The Dissect filter is a kind of split operation. Unlike a regular split operation where one delimiter is applied to +the whole string, this operation applies a set of delimiters to a string value. + +Dissect does not use regular expressions and is very fast. + +However, if the structure of your text varies from line to line then Grok is more suitable. + +There is a hybrid case where Dissect can be used to de-structure the section of the line that is reliably repeated and +then Grok can be used on the remaining field values with more regex predictability and less overall work to do. + + +A set of fields and delimiters is called a *dissection*. + +The dissection is described using a set of `%{}` sections: +.... +%{a} - %{b} - %{c} +.... + +A *field* is the text from `%` to `}` inclusive. + +A *delimiter* is the text between a `}` and next `%{` characters. + +[NOTE] +Any set of characters that do not fit `%{`, `'not }'`, `}` pattern is a delimiter. + +The config might look like this: +.... + filter { + dissect { + mapping => { + "message" => "%{ts} %{+ts} %{+ts} %{src} %{} %{prog}[%{pid}]: %{msg}" + } + } + } +.... +When dissecting a string from left to right, text is captured upto the first delimiter - this captured text is stored in the first field. +This is repeated for each field/# delimiter pair thereafter until the last delimiter is reached, then *the remaining text is stored in the last field*. + + +*The Key:* + +The key is the text between the `%{` and `}`, exclusive of the ?, +, & prefixes and the ordinal suffix. + +`%{?aaa}` - key is `aaa` + +`%{+bbb/3}` - key is `bbb` + +`%{&ccc}` - key is `ccc` + + +===== Normal field notation +The found value is added to the Event using the key. + +`%{some_field}` - a normal field has no prefix or suffix + +*Skip field notation:* + +The found value is stored internally but not added to the Event. + +The key, if supplied, is prefixed with a `?`. + +`%{}` is an empty skip field. + +`%{?foo}` is a named skip field. + +===== Append field notation +The value is appended to another value or stored if its the first field seen. + +The key is prefixed with a `+`. + +The final value is stored in the Event using the key. + + +[NOTE] +==== +The delimiter found before the field is appended with the value. + +If no delimiter is found before the field, a single space character is used. +==== + +`%{+some_field}` is an append field. + +`%{+some_field/2}` is an append field with an order modifier. + +An order modifier, `/digits`, allows one to reorder the append sequence. + +e.g. for a text of `1 2 3 go`, this `%{+a/2} %{+a/1} %{+a/4} %{+a/3}` will build a key/value of `a => 2 1 go 3` + +Append fields without an order modifier will append in declared order. + +e.g. for a text of `1 2 3 go`, this `%{a} %{b} %{+a}` will build two key/values of `a => 1 3 go, b => 2` + + +===== Indirect field notation +The found value is added to the Event using the found value of another field as the key. + +The key is prefixed with a `&`. + +`%{&some_field}` - an indirect field where the key is indirectly sourced from the value of `some_field`. + +e.g. for a text of `error: some_error, some_description`, this `error: %{?err}, %{&err}` will build a key/value of `some_error => some_description`. + +[NOTE] +for append and indirect field the key can refer to a field that already exists in the event before dissection. + +[NOTE] +use a Skip field if you do not want the indirection key/value stored. + +e.g. for a text of `google: 77.98`, this `%{?a}: %{&a}` will build a key/value of `google => 77.98`. + +[NOTE] +=============================== +append and indirect cannot be combined and will fail validation. + +`%{+&something}` - will add a value to the `&something` key, probably not the intended outcome. + +`%{&+something}` will add a value to the `+something` key, again probably unintended. + +=============================== + +==== Multiple Consecutive Delimiter Handling + +[IMPORTANT] +=============================== +Starting from version 1.1.1 of this plugin, multiple found delimiter handling has changed. +Now multiple consecutive delimiters will be seen as missing fields by default and not padding. +If you are already using Dissect and your source text has fields padded with extra delimiters, +you will need to change your config. Please read the section below. +=============================== + +===== Empty data between delimiters +Given this text as the sample used to create a dissection: +.... +John Smith,Big Oaks,Wood Lane,Hambledown,Canterbury,CB34RY +.... +The created dissection, with 6 fields, is: +.... +%{name},%{addr1},%{addr2},%{addr3},%{city},%{zip} +.... +When a line like this is processed: +.... +Jane Doe,4321 Fifth Avenue,,,New York,87432 +.... +Dissect will create an event with empty fields for `addr2 and addr3` like so: +.... +{ + "name": "Jane Doe", + "addr1": "4321 Fifth Avenue", + "addr2": "", + "addr3": "", + "city": "New York" + "zip": "87432" +} +.... + +===== Delimiters used as padding to visually align fields +*Padding to the right hand side* + +Given these texts as the samples used to create a dissection: +.... +00000043 ViewReceive machine-321 +f3000a3b Calc machine-123 +.... +The dissection, with 3 fields, is: +.... +%{id} %{function->} %{server} +.... +Note, above, the second field has a `->` suffix which tells Dissect to ignore padding to its right. + +Dissect will create these events: +.... +{ + "id": "00000043", + "function": "ViewReceive", + "server": "machine-123" +} +{ + "id": "f3000a3b", + "function": "Calc", + "server": "machine-321" +} +.... +[IMPORTANT] +Always add the `->` suffix to the field on the left of the padding. + +*Padding to the left hand side (to the human eye)* + +Given these texts as the samples used to create a dissection: +.... +00000043 ViewReceive machine-321 +f3000a3b Calc machine-123 +.... +The dissection, with 3 fields, is now: +.... +%{id->} %{function} %{server} +.... +Here the `->` suffix moves to the `id` field because Dissect sees the padding as being to the right of the `id` field. + + +==== Conditional processing + +You probably want to use this filter inside an `if` block. + +This ensures that the event contains a field value with a suitable structure for the dissection. + +For example... +.... +filter { + if [type] == "syslog" or "syslog" in [tags] { + dissect { + mapping => { + "message" => "%{ts} %{+ts} %{+ts} %{src} %{} %{prog}[%{pid}]: %{msg}" + } + } + } +} +.... + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Dissect Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-convert_datatype>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-convert_datatype"] +===== `convert_datatype` + + * Value type is <> + * Default value is `{}` + +With this setting `int` and `float` datatype conversions can be specified. + +These will be done after all `mapping` dissections have taken place. + +Feel free to use this setting on its own without a `mapping` section. + + +For example +[source, ruby] +filter { + dissect { + convert_datatype => { + cpu => "float" + code => "int" + } + } +} + +[id="{version}-plugins-{type}s-{plugin}-mapping"] +===== `mapping` + + * Value type is <> + * Default value is `{}` + +A hash of dissections of `field => value` + +A later dissection can be done on values from a previous dissection or they can be independent. + +For example +[source, ruby] +filter { + dissect { + mapping => { + "message" => "%{field1} %{field2} %{description}" + "description" => "%{field3} %{field4} %{field5}" + } + } +} + +This is useful if you want to keep the field `description` but also +dissect it some more. + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_dissectfailure"]` + +Append values to the `tags` field when dissection fails + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/dns-index.asciidoc b/docs/versioned-plugins/filters/dns-index.asciidoc new file mode 100644 index 000000000..a30b89960 --- /dev/null +++ b/docs/versioned-plugins/filters/dns-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: dns +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-08-09 +| <> | 2017-06-23 +|======================================================================= + +include::dns-v3.0.7.asciidoc[] +include::dns-v3.0.6.asciidoc[] +include::dns-v3.0.5.asciidoc[] +include::dns-v3.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/filters/dns-v3.0.4.asciidoc b/docs/versioned-plugins/filters/dns-v3.0.4.asciidoc new file mode 100644 index 000000000..49e8094b7 --- /dev/null +++ b/docs/versioned-plugins/filters/dns-v3.0.4.asciidoc @@ -0,0 +1,161 @@ +:plugin: dns +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-dns/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Dns filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The DNS filter performs a lookup (either an A record/CNAME record lookup +or a reverse lookup at the PTR record) on records specified under the +`reverse` arrays or respectively under the `resolve` arrays. + +The config should look like this: +[source,ruby] + filter { + dns { + reverse => [ "source_host", "field_with_address" ] + resolve => [ "field_with_fqdn" ] + action => "replace" + } + } + +This filter, like all filters, only processes 1 event at a time, so the use +of this plugin can significantly slow down your pipeline's throughput if you +have a high latency network. By way of example, if each DNS lookup takes 2 +milliseconds, the maximum throughput you can achieve with a single filter +worker is 500 events per second (1000 milliseconds / 2 milliseconds). + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Dns Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>, one of `["append", "replace"]`|No +| <<{version}-plugins-{type}s-{plugin}-failed_cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failed_cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hit_cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hit_cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hostsfile>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nameserver>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resolve>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reverse>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value can be any of: `append`, `replace` + * Default value is `"append"` + +Determine what action to do: append or replace the values in the fields +specified under `reverse` and `resolve`. + +[id="{version}-plugins-{type}s-{plugin}-failed_cache_size"] +===== `failed_cache_size` + + * Value type is <> + * Default value is `0` + +cache size for failed requests + +[id="{version}-plugins-{type}s-{plugin}-failed_cache_ttl"] +===== `failed_cache_ttl` + + * Value type is <> + * Default value is `5` + +how long to cache failed requests (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-hit_cache_size"] +===== `hit_cache_size` + + * Value type is <> + * Default value is `0` + +set the size of cache for successful requests + +[id="{version}-plugins-{type}s-{plugin}-hit_cache_ttl"] +===== `hit_cache_ttl` + + * Value type is <> + * Default value is `60` + +how long to cache successful requests (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-hostsfile"] +===== `hostsfile` + + * Value type is <> + * There is no default value for this setting. + +Use custom hosts file(s). For example: `["/var/db/my_custom_hosts"]` + +[id="{version}-plugins-{type}s-{plugin}-max_retries"] +===== `max_retries` + + * Value type is <> + * Default value is `2` + +number of times to retry a failed resolve/reverse + +[id="{version}-plugins-{type}s-{plugin}-nameserver"] +===== `nameserver` + + * Value type is <> + * There is no default value for this setting. + +Use custom nameserver(s). For example: `["8.8.8.8", "8.8.4.4"]` + +[id="{version}-plugins-{type}s-{plugin}-resolve"] +===== `resolve` + + * Value type is <> + * There is no default value for this setting. + +Forward resolve one or more fields. + +[id="{version}-plugins-{type}s-{plugin}-reverse"] +===== `reverse` + + * Value type is <> + * There is no default value for this setting. + +Reverse resolve one or more fields. + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `0.5` + +`resolv` calls will be wrapped in a timeout instance + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/dns-v3.0.5.asciidoc b/docs/versioned-plugins/filters/dns-v3.0.5.asciidoc new file mode 100644 index 000000000..4902ba804 --- /dev/null +++ b/docs/versioned-plugins/filters/dns-v3.0.5.asciidoc @@ -0,0 +1,161 @@ +:plugin: dns +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-09 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-dns/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Dns filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The DNS filter performs a lookup (either an A record/CNAME record lookup +or a reverse lookup at the PTR record) on records specified under the +`reverse` arrays or respectively under the `resolve` arrays. + +The config should look like this: +[source,ruby] + filter { + dns { + reverse => [ "source_host", "field_with_address" ] + resolve => [ "field_with_fqdn" ] + action => "replace" + } + } + +This filter, like all filters, only processes 1 event at a time, so the use +of this plugin can significantly slow down your pipeline's throughput if you +have a high latency network. By way of example, if each DNS lookup takes 2 +milliseconds, the maximum throughput you can achieve with a single filter +worker is 500 events per second (1000 milliseconds / 2 milliseconds). + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Dns Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>, one of `["append", "replace"]`|No +| <<{version}-plugins-{type}s-{plugin}-failed_cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failed_cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hit_cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hit_cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hostsfile>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nameserver>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resolve>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reverse>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value can be any of: `append`, `replace` + * Default value is `"append"` + +Determine what action to do: append or replace the values in the fields +specified under `reverse` and `resolve`. + +[id="{version}-plugins-{type}s-{plugin}-failed_cache_size"] +===== `failed_cache_size` + + * Value type is <> + * Default value is `0` + +cache size for failed requests + +[id="{version}-plugins-{type}s-{plugin}-failed_cache_ttl"] +===== `failed_cache_ttl` + + * Value type is <> + * Default value is `5` + +how long to cache failed requests (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-hit_cache_size"] +===== `hit_cache_size` + + * Value type is <> + * Default value is `0` + +set the size of cache for successful requests + +[id="{version}-plugins-{type}s-{plugin}-hit_cache_ttl"] +===== `hit_cache_ttl` + + * Value type is <> + * Default value is `60` + +how long to cache successful requests (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-hostsfile"] +===== `hostsfile` + + * Value type is <> + * There is no default value for this setting. + +Use custom hosts file(s). For example: `["/var/db/my_custom_hosts"]` + +[id="{version}-plugins-{type}s-{plugin}-max_retries"] +===== `max_retries` + + * Value type is <> + * Default value is `2` + +number of times to retry a failed resolve/reverse + +[id="{version}-plugins-{type}s-{plugin}-nameserver"] +===== `nameserver` + + * Value type is <> + * There is no default value for this setting. + +Use custom nameserver(s). For example: `["8.8.8.8", "8.8.4.4"]` + +[id="{version}-plugins-{type}s-{plugin}-resolve"] +===== `resolve` + + * Value type is <> + * There is no default value for this setting. + +Forward resolve one or more fields. + +[id="{version}-plugins-{type}s-{plugin}-reverse"] +===== `reverse` + + * Value type is <> + * There is no default value for this setting. + +Reverse resolve one or more fields. + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `0.5` + +`resolv` calls will be wrapped in a timeout instance + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/dns-v3.0.6.asciidoc b/docs/versioned-plugins/filters/dns-v3.0.6.asciidoc new file mode 100644 index 000000000..07f93a6b5 --- /dev/null +++ b/docs/versioned-plugins/filters/dns-v3.0.6.asciidoc @@ -0,0 +1,161 @@ +:plugin: dns +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-dns/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Dns filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The DNS filter performs a lookup (either an A record/CNAME record lookup +or a reverse lookup at the PTR record) on records specified under the +`reverse` arrays or respectively under the `resolve` arrays. + +The config should look like this: +[source,ruby] + filter { + dns { + reverse => [ "source_host", "field_with_address" ] + resolve => [ "field_with_fqdn" ] + action => "replace" + } + } + +This filter, like all filters, only processes 1 event at a time, so the use +of this plugin can significantly slow down your pipeline's throughput if you +have a high latency network. By way of example, if each DNS lookup takes 2 +milliseconds, the maximum throughput you can achieve with a single filter +worker is 500 events per second (1000 milliseconds / 2 milliseconds). + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Dns Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>, one of `["append", "replace"]`|No +| <<{version}-plugins-{type}s-{plugin}-failed_cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failed_cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hit_cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hit_cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hostsfile>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nameserver>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resolve>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reverse>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value can be any of: `append`, `replace` + * Default value is `"append"` + +Determine what action to do: append or replace the values in the fields +specified under `reverse` and `resolve`. + +[id="{version}-plugins-{type}s-{plugin}-failed_cache_size"] +===== `failed_cache_size` + + * Value type is <> + * Default value is `0` + +cache size for failed requests + +[id="{version}-plugins-{type}s-{plugin}-failed_cache_ttl"] +===== `failed_cache_ttl` + + * Value type is <> + * Default value is `5` + +how long to cache failed requests (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-hit_cache_size"] +===== `hit_cache_size` + + * Value type is <> + * Default value is `0` + +set the size of cache for successful requests + +[id="{version}-plugins-{type}s-{plugin}-hit_cache_ttl"] +===== `hit_cache_ttl` + + * Value type is <> + * Default value is `60` + +how long to cache successful requests (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-hostsfile"] +===== `hostsfile` + + * Value type is <> + * There is no default value for this setting. + +Use custom hosts file(s). For example: `["/var/db/my_custom_hosts"]` + +[id="{version}-plugins-{type}s-{plugin}-max_retries"] +===== `max_retries` + + * Value type is <> + * Default value is `2` + +number of times to retry a failed resolve/reverse + +[id="{version}-plugins-{type}s-{plugin}-nameserver"] +===== `nameserver` + + * Value type is <> + * There is no default value for this setting. + +Use custom nameserver(s). For example: `["8.8.8.8", "8.8.4.4"]` + +[id="{version}-plugins-{type}s-{plugin}-resolve"] +===== `resolve` + + * Value type is <> + * There is no default value for this setting. + +Forward resolve one or more fields. + +[id="{version}-plugins-{type}s-{plugin}-reverse"] +===== `reverse` + + * Value type is <> + * There is no default value for this setting. + +Reverse resolve one or more fields. + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `0.5` + +`resolv` calls will be wrapped in a timeout instance + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/dns-v3.0.7.asciidoc b/docs/versioned-plugins/filters/dns-v3.0.7.asciidoc new file mode 100644 index 000000000..49f3db696 --- /dev/null +++ b/docs/versioned-plugins/filters/dns-v3.0.7.asciidoc @@ -0,0 +1,161 @@ +:plugin: dns +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.7 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-dns/blob/v3.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Dns filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The DNS filter performs a lookup (either an A record/CNAME record lookup +or a reverse lookup at the PTR record) on records specified under the +`reverse` arrays or respectively under the `resolve` arrays. + +The config should look like this: +[source,ruby] + filter { + dns { + reverse => [ "source_host", "field_with_address" ] + resolve => [ "field_with_fqdn" ] + action => "replace" + } + } + +This filter, like all filters, only processes 1 event at a time, so the use +of this plugin can significantly slow down your pipeline's throughput if you +have a high latency network. By way of example, if each DNS lookup takes 2 +milliseconds, the maximum throughput you can achieve with a single filter +worker is 500 events per second (1000 milliseconds / 2 milliseconds). + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Dns Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>, one of `["append", "replace"]`|No +| <<{version}-plugins-{type}s-{plugin}-failed_cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failed_cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hit_cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hit_cache_ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hostsfile>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nameserver>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resolve>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reverse>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value can be any of: `append`, `replace` + * Default value is `"append"` + +Determine what action to do: append or replace the values in the fields +specified under `reverse` and `resolve`. + +[id="{version}-plugins-{type}s-{plugin}-failed_cache_size"] +===== `failed_cache_size` + + * Value type is <> + * Default value is `0` + +cache size for failed requests + +[id="{version}-plugins-{type}s-{plugin}-failed_cache_ttl"] +===== `failed_cache_ttl` + + * Value type is <> + * Default value is `5` + +how long to cache failed requests (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-hit_cache_size"] +===== `hit_cache_size` + + * Value type is <> + * Default value is `0` + +set the size of cache for successful requests + +[id="{version}-plugins-{type}s-{plugin}-hit_cache_ttl"] +===== `hit_cache_ttl` + + * Value type is <> + * Default value is `60` + +how long to cache successful requests (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-hostsfile"] +===== `hostsfile` + + * Value type is <> + * There is no default value for this setting. + +Use custom hosts file(s). For example: `["/var/db/my_custom_hosts"]` + +[id="{version}-plugins-{type}s-{plugin}-max_retries"] +===== `max_retries` + + * Value type is <> + * Default value is `2` + +number of times to retry a failed resolve/reverse + +[id="{version}-plugins-{type}s-{plugin}-nameserver"] +===== `nameserver` + + * Value type is <> + * There is no default value for this setting. + +Use custom nameserver(s). For example: `["8.8.8.8", "8.8.4.4"]` + +[id="{version}-plugins-{type}s-{plugin}-resolve"] +===== `resolve` + + * Value type is <> + * There is no default value for this setting. + +Forward resolve one or more fields. + +[id="{version}-plugins-{type}s-{plugin}-reverse"] +===== `reverse` + + * Value type is <> + * There is no default value for this setting. + +Reverse resolve one or more fields. + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `0.5` + +`resolv` calls will be wrapped in a timeout instance + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/drop-index.asciidoc b/docs/versioned-plugins/filters/drop-index.asciidoc new file mode 100644 index 000000000..eb062420f --- /dev/null +++ b/docs/versioned-plugins/filters/drop-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: drop +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::drop-v3.0.5.asciidoc[] +include::drop-v3.0.4.asciidoc[] +include::drop-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/filters/drop-v3.0.3.asciidoc b/docs/versioned-plugins/filters/drop-v3.0.3.asciidoc new file mode 100644 index 000000000..0f98ffc4c --- /dev/null +++ b/docs/versioned-plugins/filters/drop-v3.0.3.asciidoc @@ -0,0 +1,77 @@ +:plugin: drop +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-drop/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Drop filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Drop filter. + +Drops everything that gets to this filter. + +This is best used in combination with conditionals, for example: +[source,ruby] + filter { + if [loglevel] == "debug" { + drop { } + } + } + +The above will only pass events to the drop filter if the loglevel field is +`debug`. This will cause all events matching to be dropped. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Drop Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-percentage>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-percentage"] +===== `percentage` + + * Value type is <> + * Default value is `100` + +Drop all the events within a pre-configured percentage. + +This is useful if you just need a percentage but not the whole. + +Example, to only drop around 40% of the events that have the field loglevel with value "debug". + + filter { + if [loglevel] == "debug" { + drop { + percentage => 40 + } + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/drop-v3.0.4.asciidoc b/docs/versioned-plugins/filters/drop-v3.0.4.asciidoc new file mode 100644 index 000000000..50747961d --- /dev/null +++ b/docs/versioned-plugins/filters/drop-v3.0.4.asciidoc @@ -0,0 +1,77 @@ +:plugin: drop +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-drop/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Drop filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Drop filter. + +Drops everything that gets to this filter. + +This is best used in combination with conditionals, for example: +[source,ruby] + filter { + if [loglevel] == "debug" { + drop { } + } + } + +The above will only pass events to the drop filter if the loglevel field is +`debug`. This will cause all events matching to be dropped. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Drop Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-percentage>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-percentage"] +===== `percentage` + + * Value type is <> + * Default value is `100` + +Drop all the events within a pre-configured percentage. + +This is useful if you just need a percentage but not the whole. + +Example, to only drop around 40% of the events that have the field loglevel with value "debug". + + filter { + if [loglevel] == "debug" { + drop { + percentage => 40 + } + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/drop-v3.0.5.asciidoc b/docs/versioned-plugins/filters/drop-v3.0.5.asciidoc new file mode 100644 index 000000000..d07312a0e --- /dev/null +++ b/docs/versioned-plugins/filters/drop-v3.0.5.asciidoc @@ -0,0 +1,77 @@ +:plugin: drop +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-drop/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Drop filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Drop filter. + +Drops everything that gets to this filter. + +This is best used in combination with conditionals, for example: +[source,ruby] + filter { + if [loglevel] == "debug" { + drop { } + } + } + +The above will only pass events to the drop filter if the loglevel field is +`debug`. This will cause all events matching to be dropped. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Drop Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-percentage>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-percentage"] +===== `percentage` + + * Value type is <> + * Default value is `100` + +Drop all the events within a pre-configured percentage. + +This is useful if you just need a percentage but not the whole. + +Example, to only drop around 40% of the events that have the field loglevel with value "debug". + + filter { + if [loglevel] == "debug" { + drop { + percentage => 40 + } + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/elapsed-index.asciidoc b/docs/versioned-plugins/filters/elapsed-index.asciidoc new file mode 100644 index 000000000..7c99bd2cc --- /dev/null +++ b/docs/versioned-plugins/filters/elapsed-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: elapsed +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::elapsed-v4.0.4.asciidoc[] +include::elapsed-v4.0.3.asciidoc[] +include::elapsed-v4.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/filters/elapsed-v4.0.2.asciidoc b/docs/versioned-plugins/filters/elapsed-v4.0.2.asciidoc new file mode 100644 index 000000000..4af9a417b --- /dev/null +++ b/docs/versioned-plugins/filters/elapsed-v4.0.2.asciidoc @@ -0,0 +1,168 @@ +:plugin: elapsed +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-elapsed/blob/v4.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Elapsed filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The elapsed filter tracks a pair of start/end events and uses their +timestamps to calculate the elapsed time between them. + +The filter has been developed to track the execution time of processes and +other long tasks. + +The configuration looks like this: +[source,ruby] + filter { + elapsed { + start_tag => "start event tag" + end_tag => "end event tag" + unique_id_field => "id field name" + timeout => seconds + new_event_on_match => true/false + } + } + +The events managed by this filter must have some particular properties. +The event describing the start of the task (the "start event") must contain +a tag equal to `start_tag`. On the other side, the event describing the end +of the task (the "end event") must contain a tag equal to `end_tag`. Both +these two kinds of event need to own an ID field which identify uniquely that +particular task. The name of this field is stored in `unique_id_field`. + +You can use a Grok filter to prepare the events for the elapsed filter. +An example of configuration can be: +[source,ruby] + filter { + grok { + match => { "message" => "%{TIMESTAMP_ISO8601} START id: (?.*)" } + add_tag => [ "taskStarted" ] + } + + grok { + match => { "message" => "%{TIMESTAMP_ISO8601} END id: (?.*)" } + add_tag => [ "taskTerminated" ] + } + + elapsed { + start_tag => "taskStarted" + end_tag => "taskTerminated" + unique_id_field => "task_id" + } + } + +The elapsed filter collects all the "start events". If two, or more, "start +events" have the same ID, only the first one is recorded, the others are +discarded. + +When an "end event" matching a previously collected "start event" is +received, there is a match. The configuration property `new_event_on_match` +tells where to insert the elapsed information: they can be added to the +"end event" or a new "match event" can be created. Both events store the +following information: + +* the tags `elapsed` and `elapsed_match` +* the field `elapsed_time` with the difference, in seconds, between + the two events timestamps +* an ID filed with the task ID +* the field `elapsed_timestamp_start` with the timestamp of the start event + +If the "end event" does not arrive before "timeout" seconds, the +"start event" is discarded and an "expired event" is generated. This event +contains: + +* the tags `elapsed` and `elapsed_expired_error` +* a field called `elapsed_time` with the age, in seconds, of the + "start event" +* an ID filed with the task ID +* the field `elapsed_timestamp_start` with the timestamp of the "start event" + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elapsed Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-end_tag>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-new_event_on_match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-start_tag>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-unique_id_field>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-end_tag"] +===== `end_tag` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the tag identifying the "end event" + +[id="{version}-plugins-{type}s-{plugin}-new_event_on_match"] +===== `new_event_on_match` + + * Value type is <> + * Default value is `false` + +This property manage what to do when an "end event" matches a "start event". +If it's set to `false` (default value), the elapsed information are added +to the "end event"; if it's set to `true` a new "match event" is created. + +[id="{version}-plugins-{type}s-{plugin}-start_tag"] +===== `start_tag` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the tag identifying the "start event" + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `1800` + +The amount of seconds after an "end event" can be considered lost. +The corresponding "start event" is discarded and an "expired event" +is generated. The default value is 30 minutes (1800 seconds). + +[id="{version}-plugins-{type}s-{plugin}-unique_id_field"] +===== `unique_id_field` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the field containing the task ID. +This value must uniquely identify the task in the system, otherwise +it's impossible to match the couple of events. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/elapsed-v4.0.3.asciidoc b/docs/versioned-plugins/filters/elapsed-v4.0.3.asciidoc new file mode 100644 index 000000000..f60b65943 --- /dev/null +++ b/docs/versioned-plugins/filters/elapsed-v4.0.3.asciidoc @@ -0,0 +1,168 @@ +:plugin: elapsed +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-elapsed/blob/v4.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elapsed filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The elapsed filter tracks a pair of start/end events and uses their +timestamps to calculate the elapsed time between them. + +The filter has been developed to track the execution time of processes and +other long tasks. + +The configuration looks like this: +[source,ruby] + filter { + elapsed { + start_tag => "start event tag" + end_tag => "end event tag" + unique_id_field => "id field name" + timeout => seconds + new_event_on_match => true/false + } + } + +The events managed by this filter must have some particular properties. +The event describing the start of the task (the "start event") must contain +a tag equal to `start_tag`. On the other side, the event describing the end +of the task (the "end event") must contain a tag equal to `end_tag`. Both +these two kinds of event need to own an ID field which identify uniquely that +particular task. The name of this field is stored in `unique_id_field`. + +You can use a Grok filter to prepare the events for the elapsed filter. +An example of configuration can be: +[source,ruby] + filter { + grok { + match => { "message" => "%{TIMESTAMP_ISO8601} START id: (?.*)" } + add_tag => [ "taskStarted" ] + } + + grok { + match => { "message" => "%{TIMESTAMP_ISO8601} END id: (?.*)" } + add_tag => [ "taskTerminated" ] + } + + elapsed { + start_tag => "taskStarted" + end_tag => "taskTerminated" + unique_id_field => "task_id" + } + } + +The elapsed filter collects all the "start events". If two, or more, "start +events" have the same ID, only the first one is recorded, the others are +discarded. + +When an "end event" matching a previously collected "start event" is +received, there is a match. The configuration property `new_event_on_match` +tells where to insert the elapsed information: they can be added to the +"end event" or a new "match event" can be created. Both events store the +following information: + +* the tags `elapsed` and `elapsed_match` +* the field `elapsed_time` with the difference, in seconds, between + the two events timestamps +* an ID filed with the task ID +* the field `elapsed_timestamp_start` with the timestamp of the start event + +If the "end event" does not arrive before "timeout" seconds, the +"start event" is discarded and an "expired event" is generated. This event +contains: + +* the tags `elapsed` and `elapsed_expired_error` +* a field called `elapsed_time` with the age, in seconds, of the + "start event" +* an ID filed with the task ID +* the field `elapsed_timestamp_start` with the timestamp of the "start event" + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elapsed Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-end_tag>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-new_event_on_match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-start_tag>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-unique_id_field>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-end_tag"] +===== `end_tag` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the tag identifying the "end event" + +[id="{version}-plugins-{type}s-{plugin}-new_event_on_match"] +===== `new_event_on_match` + + * Value type is <> + * Default value is `false` + +This property manage what to do when an "end event" matches a "start event". +If it's set to `false` (default value), the elapsed information are added +to the "end event"; if it's set to `true` a new "match event" is created. + +[id="{version}-plugins-{type}s-{plugin}-start_tag"] +===== `start_tag` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the tag identifying the "start event" + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `1800` + +The amount of seconds after an "end event" can be considered lost. +The corresponding "start event" is discarded and an "expired event" +is generated. The default value is 30 minutes (1800 seconds). + +[id="{version}-plugins-{type}s-{plugin}-unique_id_field"] +===== `unique_id_field` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the field containing the task ID. +This value must uniquely identify the task in the system, otherwise +it's impossible to match the couple of events. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/elapsed-v4.0.4.asciidoc b/docs/versioned-plugins/filters/elapsed-v4.0.4.asciidoc new file mode 100644 index 000000000..aa3f1765d --- /dev/null +++ b/docs/versioned-plugins/filters/elapsed-v4.0.4.asciidoc @@ -0,0 +1,168 @@ +:plugin: elapsed +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.4 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-elapsed/blob/v4.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elapsed filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The elapsed filter tracks a pair of start/end events and uses their +timestamps to calculate the elapsed time between them. + +The filter has been developed to track the execution time of processes and +other long tasks. + +The configuration looks like this: +[source,ruby] + filter { + elapsed { + start_tag => "start event tag" + end_tag => "end event tag" + unique_id_field => "id field name" + timeout => seconds + new_event_on_match => true/false + } + } + +The events managed by this filter must have some particular properties. +The event describing the start of the task (the "start event") must contain +a tag equal to `start_tag`. On the other side, the event describing the end +of the task (the "end event") must contain a tag equal to `end_tag`. Both +these two kinds of event need to own an ID field which identify uniquely that +particular task. The name of this field is stored in `unique_id_field`. + +You can use a Grok filter to prepare the events for the elapsed filter. +An example of configuration can be: +[source,ruby] + filter { + grok { + match => { "message" => "%{TIMESTAMP_ISO8601} START id: (?.*)" } + add_tag => [ "taskStarted" ] + } + + grok { + match => { "message" => "%{TIMESTAMP_ISO8601} END id: (?.*)" } + add_tag => [ "taskTerminated" ] + } + + elapsed { + start_tag => "taskStarted" + end_tag => "taskTerminated" + unique_id_field => "task_id" + } + } + +The elapsed filter collects all the "start events". If two, or more, "start +events" have the same ID, only the first one is recorded, the others are +discarded. + +When an "end event" matching a previously collected "start event" is +received, there is a match. The configuration property `new_event_on_match` +tells where to insert the elapsed information: they can be added to the +"end event" or a new "match event" can be created. Both events store the +following information: + +* the tags `elapsed` and `elapsed_match` +* the field `elapsed_time` with the difference, in seconds, between + the two events timestamps +* an ID filed with the task ID +* the field `elapsed_timestamp_start` with the timestamp of the start event + +If the "end event" does not arrive before "timeout" seconds, the +"start event" is discarded and an "expired event" is generated. This event +contains: + +* the tags `elapsed` and `elapsed_expired_error` +* a field called `elapsed_time` with the age, in seconds, of the + "start event" +* an ID filed with the task ID +* the field `elapsed_timestamp_start` with the timestamp of the "start event" + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elapsed Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-end_tag>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-new_event_on_match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-start_tag>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-unique_id_field>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-end_tag"] +===== `end_tag` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the tag identifying the "end event" + +[id="{version}-plugins-{type}s-{plugin}-new_event_on_match"] +===== `new_event_on_match` + + * Value type is <> + * Default value is `false` + +This property manage what to do when an "end event" matches a "start event". +If it's set to `false` (default value), the elapsed information are added +to the "end event"; if it's set to `true` a new "match event" is created. + +[id="{version}-plugins-{type}s-{plugin}-start_tag"] +===== `start_tag` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the tag identifying the "start event" + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `1800` + +The amount of seconds after an "end event" can be considered lost. +The corresponding "start event" is discarded and an "expired event" +is generated. The default value is 30 minutes (1800 seconds). + +[id="{version}-plugins-{type}s-{plugin}-unique_id_field"] +===== `unique_id_field` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the field containing the task ID. +This value must uniquely identify the task in the system, otherwise +it's impossible to match the couple of events. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/elasticsearch-index.asciidoc b/docs/versioned-plugins/filters/elasticsearch-index.asciidoc new file mode 100644 index 000000000..659140872 --- /dev/null +++ b/docs/versioned-plugins/filters/elasticsearch-index.asciidoc @@ -0,0 +1,22 @@ +:plugin: elasticsearch +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-09-29 +| <> | 2017-08-15 +| <> | 2017-07-28 +| <> | 2017-06-23 +| <> | 2017-05-03 +|======================================================================= + +include::elasticsearch-v3.2.1.asciidoc[] +include::elasticsearch-v3.2.0.asciidoc[] +include::elasticsearch-v3.1.6.asciidoc[] +include::elasticsearch-v3.1.5.asciidoc[] +include::elasticsearch-v3.1.4.asciidoc[] +include::elasticsearch-v3.1.3.asciidoc[] + diff --git a/docs/versioned-plugins/filters/elasticsearch-v3.1.3.asciidoc b/docs/versioned-plugins/filters/elasticsearch-v3.1.3.asciidoc new file mode 100644 index 000000000..918be3965 --- /dev/null +++ b/docs/versioned-plugins/filters/elasticsearch-v3.1.3.asciidoc @@ -0,0 +1,236 @@ +:plugin: elasticsearch +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-05-03 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-elasticsearch/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Elasticsearch + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch filter +plugin to version 3.1.1 or higher. + +================================================================================ + +Search Elasticsearch for a previous log event and copy some fields from it +into the current event. Below are two complete examples of how this filter might +be used. + +The first example uses the legacy 'query' parameter where the user is limited to an Elasticsearch query_string. +Whenever logstash receives an "end" event, it uses this elasticsearch +filter to find the matching "start" event based on some operation identifier. +Then it copies the `@timestamp` field from the "start" event into a new field on +the "end" event. Finally, using a combination of the "date" filter and the +"ruby" filter, we calculate the time duration in hours between the two events. +[source,ruby] +-------------------------------------------------- + if [type] == "end" { + elasticsearch { + hosts => ["es-server"] + query => "type:start AND operation:%{[opid]}" + fields => { "@timestamp" => "started" } + } + + date { + match => ["[started]", "ISO8601"] + target => "[started]" + } + + ruby { + code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" + } + } + + The example below reproduces the above example but utilises the query_template. This query_template represents a full + Elasticsearch query DSL and supports the standard Logstash field substitution syntax. The example below issues + the same query as the first example but uses the template shown. + + if [type] == "end" { + elasticsearch { + hosts => ["es-server"] + query_template => "template.json" + } + + date { + match => ["[started]", "ISO8601"] + target => "[started]" + } + + ruby { + code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" + } + } + + + + template.json: + + { + "query": { + "query_string": { + "query": "type:start AND operation:%{[opid]}" + } + }, + "_source": ["@timestamp", "started"] + } + +As illustrated above, through the use of 'opid', fields from the Logstash events can be referenced within the template. +The template will be populated per event prior to being used to query Elasticsearch. + +-------------------------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Filter Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-enable_sort>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-result_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sort>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ca_file"] +===== `ca_file` + + * Value type is <> + * There is no default value for this setting. + +SSL Certificate Authority file + +[id="{version}-plugins-{type}s-{plugin}-enable_sort"] +===== `enable_sort` + + * Value type is <> + * Default value is `true` + +Whether results should be sorted or not + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * Default value is `{}` + +Array of fields to copy from old event (found via elasticsearch) into new event + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `["localhost:9200"]` + +List of elasticsearch hosts to use for querying. + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `""` + +Comma-delimited list of index names to search; use `_all` or empty string to perform the operation on all indices + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Basic Auth - password + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * Value type is <> + * There is no default value for this setting. + +Elasticsearch query string. Read the Elasticsearch query string documentation. +for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-query-string-query.html#query-string-syntax + +[id="{version}-plugins-{type}s-{plugin}-query_template"] +===== `query_template` + + * Value type is <> + * There is no default value for this setting. + +File path to elasticsearch query in DSL format. Read the Elasticsearch query documentation +for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html + +[id="{version}-plugins-{type}s-{plugin}-result_size"] +===== `result_size` + + * Value type is <> + * Default value is `1` + +How many results to return + +[id="{version}-plugins-{type}s-{plugin}-sort"] +===== `sort` + + * Value type is <> + * Default value is `"@timestamp:desc"` + +Comma-delimited list of `:` pairs that define the sort order + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +SSL + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_elasticsearch_lookup_failure"]` + +Tags the event on failure to look up geo information. This can be used in later analysis. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Basic Auth - username + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/elasticsearch-v3.1.4.asciidoc b/docs/versioned-plugins/filters/elasticsearch-v3.1.4.asciidoc new file mode 100644 index 000000000..a352c7eaa --- /dev/null +++ b/docs/versioned-plugins/filters/elasticsearch-v3.1.4.asciidoc @@ -0,0 +1,237 @@ +:plugin: elasticsearch +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-elasticsearch/blob/v3.1.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Elasticsearch filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch filter +plugin to version 3.1.1 or higher. + +================================================================================ + +Search Elasticsearch for a previous log event and copy some fields from it +into the current event. Below are two complete examples of how this filter might +be used. + +The first example uses the legacy 'query' parameter where the user is limited to an Elasticsearch query_string. +Whenever logstash receives an "end" event, it uses this elasticsearch +filter to find the matching "start" event based on some operation identifier. +Then it copies the `@timestamp` field from the "start" event into a new field on +the "end" event. Finally, using a combination of the "date" filter and the +"ruby" filter, we calculate the time duration in hours between the two events. +[source,ruby] +-------------------------------------------------- + if [type] == "end" { + elasticsearch { + hosts => ["es-server"] + query => "type:start AND operation:%{[opid]}" + fields => { "@timestamp" => "started" } + } + + date { + match => ["[started]", "ISO8601"] + target => "[started]" + } + + ruby { + code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" + } + } + + The example below reproduces the above example but utilises the query_template. This query_template represents a full + Elasticsearch query DSL and supports the standard Logstash field substitution syntax. The example below issues + the same query as the first example but uses the template shown. + + if [type] == "end" { + elasticsearch { + hosts => ["es-server"] + query_template => "template.json" + } + + date { + match => ["[started]", "ISO8601"] + target => "[started]" + } + + ruby { + code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" + } + } + + + + template.json: + + { + "query": { + "query_string": { + "query": "type:start AND operation:%{[opid]}" + } + }, + "_source": ["@timestamp", "started"] + } + +As illustrated above, through the use of 'opid', fields from the Logstash events can be referenced within the template. +The template will be populated per event prior to being used to query Elasticsearch. + +-------------------------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-enable_sort>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-result_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sort>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ca_file"] +===== `ca_file` + + * Value type is <> + * There is no default value for this setting. + +SSL Certificate Authority file + +[id="{version}-plugins-{type}s-{plugin}-enable_sort"] +===== `enable_sort` + + * Value type is <> + * Default value is `true` + +Whether results should be sorted or not + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * Default value is `{}` + +Array of fields to copy from old event (found via elasticsearch) into new event + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `["localhost:9200"]` + +List of elasticsearch hosts to use for querying. + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `""` + +Comma-delimited list of index names to search; use `_all` or empty string to perform the operation on all indices + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Basic Auth - password + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * Value type is <> + * There is no default value for this setting. + +Elasticsearch query string. Read the Elasticsearch query string documentation. +for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-query-string-query.html#query-string-syntax + +[id="{version}-plugins-{type}s-{plugin}-query_template"] +===== `query_template` + + * Value type is <> + * There is no default value for this setting. + +File path to elasticsearch query in DSL format. Read the Elasticsearch query documentation +for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html + +[id="{version}-plugins-{type}s-{plugin}-result_size"] +===== `result_size` + + * Value type is <> + * Default value is `1` + +How many results to return + +[id="{version}-plugins-{type}s-{plugin}-sort"] +===== `sort` + + * Value type is <> + * Default value is `"@timestamp:desc"` + +Comma-delimited list of `:` pairs that define the sort order + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +SSL + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_elasticsearch_lookup_failure"]` + +Tags the event on failure to look up geo information. This can be used in later analysis. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Basic Auth - username + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/elasticsearch-v3.1.5.asciidoc b/docs/versioned-plugins/filters/elasticsearch-v3.1.5.asciidoc new file mode 100644 index 000000000..b6d8e1bf2 --- /dev/null +++ b/docs/versioned-plugins/filters/elasticsearch-v3.1.5.asciidoc @@ -0,0 +1,237 @@ +:plugin: elasticsearch +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.5 +:release_date: 2017-07-28 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-elasticsearch/blob/v3.1.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch filter +plugin to version 3.1.1 or higher. + +================================================================================ + +Search Elasticsearch for a previous log event and copy some fields from it +into the current event. Below are two complete examples of how this filter might +be used. + +The first example uses the legacy 'query' parameter where the user is limited to an Elasticsearch query_string. +Whenever logstash receives an "end" event, it uses this elasticsearch +filter to find the matching "start" event based on some operation identifier. +Then it copies the `@timestamp` field from the "start" event into a new field on +the "end" event. Finally, using a combination of the "date" filter and the +"ruby" filter, we calculate the time duration in hours between the two events. +[source,ruby] +-------------------------------------------------- + if [type] == "end" { + elasticsearch { + hosts => ["es-server"] + query => "type:start AND operation:%{[opid]}" + fields => { "@timestamp" => "started" } + } + + date { + match => ["[started]", "ISO8601"] + target => "[started]" + } + + ruby { + code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" + } + } + + The example below reproduces the above example but utilises the query_template. This query_template represents a full + Elasticsearch query DSL and supports the standard Logstash field substitution syntax. The example below issues + the same query as the first example but uses the template shown. + + if [type] == "end" { + elasticsearch { + hosts => ["es-server"] + query_template => "template.json" + } + + date { + match => ["[started]", "ISO8601"] + target => "[started]" + } + + ruby { + code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" + } + } + + + + template.json: + + { + "query": { + "query_string": { + "query": "type:start AND operation:%{[opid]}" + } + }, + "_source": ["@timestamp", "started"] + } + +As illustrated above, through the use of 'opid', fields from the Logstash events can be referenced within the template. +The template will be populated per event prior to being used to query Elasticsearch. + +-------------------------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-enable_sort>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-result_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sort>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ca_file"] +===== `ca_file` + + * Value type is <> + * There is no default value for this setting. + +SSL Certificate Authority file + +[id="{version}-plugins-{type}s-{plugin}-enable_sort"] +===== `enable_sort` + + * Value type is <> + * Default value is `true` + +Whether results should be sorted or not + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * Default value is `{}` + +Array of fields to copy from old event (found via elasticsearch) into new event + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `["localhost:9200"]` + +List of elasticsearch hosts to use for querying. + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `""` + +Comma-delimited list of index names to search; use `_all` or empty string to perform the operation on all indices + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Basic Auth - password + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * Value type is <> + * There is no default value for this setting. + +Elasticsearch query string. Read the Elasticsearch query string documentation. +for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-query-string-query.html#query-string-syntax + +[id="{version}-plugins-{type}s-{plugin}-query_template"] +===== `query_template` + + * Value type is <> + * There is no default value for this setting. + +File path to elasticsearch query in DSL format. Read the Elasticsearch query documentation +for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html + +[id="{version}-plugins-{type}s-{plugin}-result_size"] +===== `result_size` + + * Value type is <> + * Default value is `1` + +How many results to return + +[id="{version}-plugins-{type}s-{plugin}-sort"] +===== `sort` + + * Value type is <> + * Default value is `"@timestamp:desc"` + +Comma-delimited list of `:` pairs that define the sort order + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +SSL + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_elasticsearch_lookup_failure"]` + +Tags the event on failure to look up geo information. This can be used in later analysis. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Basic Auth - username + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/elasticsearch-v3.1.6.asciidoc b/docs/versioned-plugins/filters/elasticsearch-v3.1.6.asciidoc new file mode 100644 index 000000000..af3ffb9b8 --- /dev/null +++ b/docs/versioned-plugins/filters/elasticsearch-v3.1.6.asciidoc @@ -0,0 +1,237 @@ +:plugin: elasticsearch +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.6 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-elasticsearch/blob/v3.1.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch filter +plugin to version 3.1.1 or higher. + +================================================================================ + +Search Elasticsearch for a previous log event and copy some fields from it +into the current event. Below are two complete examples of how this filter might +be used. + +The first example uses the legacy 'query' parameter where the user is limited to an Elasticsearch query_string. +Whenever logstash receives an "end" event, it uses this elasticsearch +filter to find the matching "start" event based on some operation identifier. +Then it copies the `@timestamp` field from the "start" event into a new field on +the "end" event. Finally, using a combination of the "date" filter and the +"ruby" filter, we calculate the time duration in hours between the two events. +[source,ruby] +-------------------------------------------------- + if [type] == "end" { + elasticsearch { + hosts => ["es-server"] + query => "type:start AND operation:%{[opid]}" + fields => { "@timestamp" => "started" } + } + + date { + match => ["[started]", "ISO8601"] + target => "[started]" + } + + ruby { + code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" + } + } + + The example below reproduces the above example but utilises the query_template. This query_template represents a full + Elasticsearch query DSL and supports the standard Logstash field substitution syntax. The example below issues + the same query as the first example but uses the template shown. + + if [type] == "end" { + elasticsearch { + hosts => ["es-server"] + query_template => "template.json" + } + + date { + match => ["[started]", "ISO8601"] + target => "[started]" + } + + ruby { + code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" + } + } + + + + template.json: + + { + "query": { + "query_string": { + "query": "type:start AND operation:%{[opid]}" + } + }, + "_source": ["@timestamp", "started"] + } + +As illustrated above, through the use of 'opid', fields from the Logstash events can be referenced within the template. +The template will be populated per event prior to being used to query Elasticsearch. + +-------------------------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-enable_sort>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-result_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sort>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ca_file"] +===== `ca_file` + + * Value type is <> + * There is no default value for this setting. + +SSL Certificate Authority file + +[id="{version}-plugins-{type}s-{plugin}-enable_sort"] +===== `enable_sort` + + * Value type is <> + * Default value is `true` + +Whether results should be sorted or not + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * Default value is `{}` + +Array of fields to copy from old event (found via elasticsearch) into new event + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `["localhost:9200"]` + +List of elasticsearch hosts to use for querying. + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `""` + +Comma-delimited list of index names to search; use `_all` or empty string to perform the operation on all indices + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Basic Auth - password + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * Value type is <> + * There is no default value for this setting. + +Elasticsearch query string. Read the Elasticsearch query string documentation. +for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-query-string-query.html#query-string-syntax + +[id="{version}-plugins-{type}s-{plugin}-query_template"] +===== `query_template` + + * Value type is <> + * There is no default value for this setting. + +File path to elasticsearch query in DSL format. Read the Elasticsearch query documentation +for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html + +[id="{version}-plugins-{type}s-{plugin}-result_size"] +===== `result_size` + + * Value type is <> + * Default value is `1` + +How many results to return + +[id="{version}-plugins-{type}s-{plugin}-sort"] +===== `sort` + + * Value type is <> + * Default value is `"@timestamp:desc"` + +Comma-delimited list of `:` pairs that define the sort order + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +SSL + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_elasticsearch_lookup_failure"]` + +Tags the event on failure to look up geo information. This can be used in later analysis. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Basic Auth - username + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/elasticsearch-v3.2.0.asciidoc b/docs/versioned-plugins/filters/elasticsearch-v3.2.0.asciidoc new file mode 100644 index 000000000..ed802d177 --- /dev/null +++ b/docs/versioned-plugins/filters/elasticsearch-v3.2.0.asciidoc @@ -0,0 +1,238 @@ +:plugin: elasticsearch +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.0 +:release_date: 2017-09-29 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-elasticsearch/blob/v3.2.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch filter +plugin to version 3.1.1 or higher. + +================================================================================ + +Search Elasticsearch for a previous log event and copy some fields from it +into the current event. Below are two complete examples of how this filter might +be used. + +The first example uses the legacy 'query' parameter where the user is limited to an Elasticsearch query_string. +Whenever logstash receives an "end" event, it uses this elasticsearch +filter to find the matching "start" event based on some operation identifier. +Then it copies the `@timestamp` field from the "start" event into a new field on +the "end" event. Finally, using a combination of the "date" filter and the +"ruby" filter, we calculate the time duration in hours between the two events. +[source,ruby] +-------------------------------------------------- + if [type] == "end" { + elasticsearch { + hosts => ["es-server"] + query => "type:start AND operation:%{[opid]}" + fields => { "@timestamp" => "started" } + } + + date { + match => ["[started]", "ISO8601"] + target => "[started]" + } + + ruby { + code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" + } + } + + The example below reproduces the above example but utilises the query_template. This query_template represents a full + Elasticsearch query DSL and supports the standard Logstash field substitution syntax. The example below issues + the same query as the first example but uses the template shown. + + if [type] == "end" { + elasticsearch { + hosts => ["es-server"] + query_template => "template.json" + } + + date { + match => ["[started]", "ISO8601"] + target => "[started]" + } + + ruby { + code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" + } + } + + + + template.json: + + { + "query": { + "query_string": { + "query": "type:start AND operation:%{[opid]}" + } + }, + "_source": ["@timestamp", "started"] + } + +As illustrated above, through the use of 'opid', fields from the Logstash events can be referenced within the template. +The template will be populated per event prior to being used to query Elasticsearch. + +-------------------------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-enable_sort>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-result_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sort>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ca_file"] +===== `ca_file` + + * Value type is <> + * There is no default value for this setting. + +SSL Certificate Authority file + +[id="{version}-plugins-{type}s-{plugin}-enable_sort"] +===== `enable_sort` + + * Value type is <> + * Default value is `true` + +Whether results should be sorted or not + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * Default value is `{}` + +Array of fields to copy from old event (found via elasticsearch) into new event + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `["localhost:9200"]` + +List of elasticsearch hosts to use for querying. + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `""` + +Comma-delimited list of index names to search; use `_all` or empty string to perform the operation on all indices. +Field substitution (e.g. `index-name-%{date_field}`) is available + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Basic Auth - password + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * Value type is <> + * There is no default value for this setting. + +Elasticsearch query string. Read the Elasticsearch query string documentation. +for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-query-string-query.html#query-string-syntax + +[id="{version}-plugins-{type}s-{plugin}-query_template"] +===== `query_template` + + * Value type is <> + * There is no default value for this setting. + +File path to elasticsearch query in DSL format. Read the Elasticsearch query documentation +for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html + +[id="{version}-plugins-{type}s-{plugin}-result_size"] +===== `result_size` + + * Value type is <> + * Default value is `1` + +How many results to return + +[id="{version}-plugins-{type}s-{plugin}-sort"] +===== `sort` + + * Value type is <> + * Default value is `"@timestamp:desc"` + +Comma-delimited list of `:` pairs that define the sort order + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +SSL + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_elasticsearch_lookup_failure"]` + +Tags the event on failure to look up geo information. This can be used in later analysis. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Basic Auth - username + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/elasticsearch-v3.2.1.asciidoc b/docs/versioned-plugins/filters/elasticsearch-v3.2.1.asciidoc new file mode 100644 index 000000000..64ed0aa39 --- /dev/null +++ b/docs/versioned-plugins/filters/elasticsearch-v3.2.1.asciidoc @@ -0,0 +1,238 @@ +:plugin: elasticsearch +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.1 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-elasticsearch/blob/v3.2.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch filter +plugin to version 3.1.1 or higher. + +================================================================================ + +Search Elasticsearch for a previous log event and copy some fields from it +into the current event. Below are two complete examples of how this filter might +be used. + +The first example uses the legacy 'query' parameter where the user is limited to an Elasticsearch query_string. +Whenever logstash receives an "end" event, it uses this elasticsearch +filter to find the matching "start" event based on some operation identifier. +Then it copies the `@timestamp` field from the "start" event into a new field on +the "end" event. Finally, using a combination of the "date" filter and the +"ruby" filter, we calculate the time duration in hours between the two events. +[source,ruby] +-------------------------------------------------- + if [type] == "end" { + elasticsearch { + hosts => ["es-server"] + query => "type:start AND operation:%{[opid]}" + fields => { "@timestamp" => "started" } + } + + date { + match => ["[started]", "ISO8601"] + target => "[started]" + } + + ruby { + code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" + } + } + + The example below reproduces the above example but utilises the query_template. This query_template represents a full + Elasticsearch query DSL and supports the standard Logstash field substitution syntax. The example below issues + the same query as the first example but uses the template shown. + + if [type] == "end" { + elasticsearch { + hosts => ["es-server"] + query_template => "template.json" + } + + date { + match => ["[started]", "ISO8601"] + target => "[started]" + } + + ruby { + code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" + } + } + + + + template.json: + + { + "query": { + "query_string": { + "query": "type:start AND operation:%{[opid]}" + } + }, + "_source": ["@timestamp", "started"] + } + +As illustrated above, through the use of 'opid', fields from the Logstash events can be referenced within the template. +The template will be populated per event prior to being used to query Elasticsearch. + +-------------------------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-enable_sort>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-result_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sort>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ca_file"] +===== `ca_file` + + * Value type is <> + * There is no default value for this setting. + +SSL Certificate Authority file + +[id="{version}-plugins-{type}s-{plugin}-enable_sort"] +===== `enable_sort` + + * Value type is <> + * Default value is `true` + +Whether results should be sorted or not + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * Default value is `{}` + +Array of fields to copy from old event (found via elasticsearch) into new event + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `["localhost:9200"]` + +List of elasticsearch hosts to use for querying. + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `""` + +Comma-delimited list of index names to search; use `_all` or empty string to perform the operation on all indices. +Field substitution (e.g. `index-name-%{date_field}`) is available + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Basic Auth - password + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * Value type is <> + * There is no default value for this setting. + +Elasticsearch query string. Read the Elasticsearch query string documentation. +for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-query-string-query.html#query-string-syntax + +[id="{version}-plugins-{type}s-{plugin}-query_template"] +===== `query_template` + + * Value type is <> + * There is no default value for this setting. + +File path to elasticsearch query in DSL format. Read the Elasticsearch query documentation +for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html + +[id="{version}-plugins-{type}s-{plugin}-result_size"] +===== `result_size` + + * Value type is <> + * Default value is `1` + +How many results to return + +[id="{version}-plugins-{type}s-{plugin}-sort"] +===== `sort` + + * Value type is <> + * Default value is `"@timestamp:desc"` + +Comma-delimited list of `:` pairs that define the sort order + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +SSL + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_elasticsearch_lookup_failure"]` + +Tags the event on failure to look up previous log event information. This can be used in later analysis. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Basic Auth - username + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/emoji-index.asciidoc b/docs/versioned-plugins/filters/emoji-index.asciidoc new file mode 100644 index 000000000..53f09a9ea --- /dev/null +++ b/docs/versioned-plugins/filters/emoji-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: emoji +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::emoji-v1.0.2.asciidoc[] +include::emoji-v1.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/emoji-v1.0.1.asciidoc b/docs/versioned-plugins/filters/emoji-v1.0.1.asciidoc new file mode 100644 index 000000000..e88c97498 --- /dev/null +++ b/docs/versioned-plugins/filters/emoji-v1.0.1.asciidoc @@ -0,0 +1,176 @@ +:plugin: emoji +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-emoji/blob/v1.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Emoji filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This plugin maps the severity names or numeric codes as defined in +https://tools.ietf.org/html/rfc3164#section-4.1.1[RFC 3164] and +https://tools.ietf.org/html/rfc5424#section-6.2.1[RFC 5424] to the emoji +as defined in the configuration. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Emoji Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-fallback>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-override>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sev_alert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sev_critical>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sev_debug>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sev_emergency>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sev_error>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sev_info>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sev_notice>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sev_warning>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-fallback"] +===== `fallback` + + * Value type is <> + * There is no default value for this setting. + +In case no match is found in the event, this will add a default emoji, which +will always populate `target`, if the match failed. + +For example, if we have configured `fallback => "`❓`"`, using this +dictionary: +[source,ruby] + foo: 👤 + +Then, if logstash received an event with the field `foo` set to 👤, the +target field would be set to 👤. However, if logstash received an event with +`foo` set to `nope`, then the target field would still be populated, but +with the value of ❓. +This configuration can be dynamic and include parts of the event using the +`%{field}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-field"] +===== `field` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the logstash event field containing the value to be compared for +a match by the emoji filter (e.g. `severity`). + +If this field is an array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-override"] +===== `override` + + * Value type is <> + * Default value is `false` + +If the target field already exists, this configuration item specifies +whether the filter should skip being rewritten as an emoji (default) or +overwrite the target field value with the emoji value. + +[id="{version}-plugins-{type}s-{plugin}-sev_alert"] +===== `sev_alert` + + * Value type is <> + * Default value is `"🚨"` + +`sev_alert` selects the emoji/unicode character for Alert severity + +[id="{version}-plugins-{type}s-{plugin}-sev_critical"] +===== `sev_critical` + + * Value type is <> + * Default value is `"🔥"` + +`sev_critical` selects the emoji/unicode character for Critical severity + +[id="{version}-plugins-{type}s-{plugin}-sev_debug"] +===== `sev_debug` + + * Value type is <> + * Default value is `"🐛"` + +`sev_debug` selects the emoji/unicode character for Debug severity + +[id="{version}-plugins-{type}s-{plugin}-sev_emergency"] +===== `sev_emergency` + + * Value type is <> + * Default value is `"💥"` + +`sev_emergency` selects the emoji/unicode character for Emergency severity + +[id="{version}-plugins-{type}s-{plugin}-sev_error"] +===== `sev_error` + + * Value type is <> + * Default value is `"❌"` + +`sev_error` selects the emoji/unicode character for Error severity + +[id="{version}-plugins-{type}s-{plugin}-sev_info"] +===== `sev_info` + + * Value type is <> + * Default value is `"ℹ️"` + +`sev_info` selects the emoji/unicode character for Informational severity + +[id="{version}-plugins-{type}s-{plugin}-sev_notice"] +===== `sev_notice` + + * Value type is <> + * Default value is `"👀"` + +`sev_notice` selects the emoji/unicode character for Notice severity + +[id="{version}-plugins-{type}s-{plugin}-sev_warning"] +===== `sev_warning` + + * Value type is <> + * Default value is `"⚠️"` + +`sev_warning` selects the emoji/unicode character for Warning severity + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"emoji"` + +The target field you wish to populate with the emoji. The default +is a field named `emoji`. Set this to the same value as the source (`field`) +if you want to do a substitution, in this case filter will allways succeed. +This will overwrite the old value of the source field! + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/emoji-v1.0.2.asciidoc b/docs/versioned-plugins/filters/emoji-v1.0.2.asciidoc new file mode 100644 index 000000000..3b8e33d62 --- /dev/null +++ b/docs/versioned-plugins/filters/emoji-v1.0.2.asciidoc @@ -0,0 +1,176 @@ +:plugin: emoji +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-emoji/blob/v1.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Emoji filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This plugin maps the severity names or numeric codes as defined in +https://tools.ietf.org/html/rfc3164#section-4.1.1[RFC 3164] and +https://tools.ietf.org/html/rfc5424#section-6.2.1[RFC 5424] to the emoji +as defined in the configuration. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Emoji Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-fallback>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-override>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sev_alert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sev_critical>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sev_debug>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sev_emergency>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sev_error>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sev_info>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sev_notice>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sev_warning>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-fallback"] +===== `fallback` + + * Value type is <> + * There is no default value for this setting. + +In case no match is found in the event, this will add a default emoji, which +will always populate `target`, if the match failed. + +For example, if we have configured `fallback => "`❓`"`, using this +dictionary: +[source,ruby] + foo: 👤 + +Then, if logstash received an event with the field `foo` set to 👤, the +target field would be set to 👤. However, if logstash received an event with +`foo` set to `nope`, then the target field would still be populated, but +with the value of ❓. +This configuration can be dynamic and include parts of the event using the +`%{field}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-field"] +===== `field` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the logstash event field containing the value to be compared for +a match by the emoji filter (e.g. `severity`). + +If this field is an array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-override"] +===== `override` + + * Value type is <> + * Default value is `false` + +If the target field already exists, this configuration item specifies +whether the filter should skip being rewritten as an emoji (default) or +overwrite the target field value with the emoji value. + +[id="{version}-plugins-{type}s-{plugin}-sev_alert"] +===== `sev_alert` + + * Value type is <> + * Default value is `"🚨"` + +`sev_alert` selects the emoji/unicode character for Alert severity + +[id="{version}-plugins-{type}s-{plugin}-sev_critical"] +===== `sev_critical` + + * Value type is <> + * Default value is `"🔥"` + +`sev_critical` selects the emoji/unicode character for Critical severity + +[id="{version}-plugins-{type}s-{plugin}-sev_debug"] +===== `sev_debug` + + * Value type is <> + * Default value is `"🐛"` + +`sev_debug` selects the emoji/unicode character for Debug severity + +[id="{version}-plugins-{type}s-{plugin}-sev_emergency"] +===== `sev_emergency` + + * Value type is <> + * Default value is `"💥"` + +`sev_emergency` selects the emoji/unicode character for Emergency severity + +[id="{version}-plugins-{type}s-{plugin}-sev_error"] +===== `sev_error` + + * Value type is <> + * Default value is `"❌"` + +`sev_error` selects the emoji/unicode character for Error severity + +[id="{version}-plugins-{type}s-{plugin}-sev_info"] +===== `sev_info` + + * Value type is <> + * Default value is `"ℹ️"` + +`sev_info` selects the emoji/unicode character for Informational severity + +[id="{version}-plugins-{type}s-{plugin}-sev_notice"] +===== `sev_notice` + + * Value type is <> + * Default value is `"👀"` + +`sev_notice` selects the emoji/unicode character for Notice severity + +[id="{version}-plugins-{type}s-{plugin}-sev_warning"] +===== `sev_warning` + + * Value type is <> + * Default value is `"⚠️"` + +`sev_warning` selects the emoji/unicode character for Warning severity + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"emoji"` + +The target field you wish to populate with the emoji. The default +is a field named `emoji`. Set this to the same value as the source (`field`) +if you want to do a substitution, in this case filter will allways succeed. +This will overwrite the old value of the source field! + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/environment-index.asciidoc b/docs/versioned-plugins/filters/environment-index.asciidoc new file mode 100644 index 000000000..3c52cb7a2 --- /dev/null +++ b/docs/versioned-plugins/filters/environment-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: environment +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::environment-v3.0.3.asciidoc[] +include::environment-v3.0.2.asciidoc[] +include::environment-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/environment-v3.0.1.asciidoc b/docs/versioned-plugins/filters/environment-v3.0.1.asciidoc new file mode 100644 index 000000000..5738f5e98 --- /dev/null +++ b/docs/versioned-plugins/filters/environment-v3.0.1.asciidoc @@ -0,0 +1,83 @@ +:plugin: environment +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-environment/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Environment filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter stores environment variables as subfields in the `@metadata` field. +You can then use these values in other parts of the pipeline. + +Adding environment variables is as easy as: + filter { + environment { + add_metadata_from_env => { "field_name" => "ENV_VAR_NAME" } + } + } + +Accessing stored environment variables is now done through the `@metadata` field: + + ["@metadata"]["field_name"] + +This would reference field `field_name`, which in the above example references +the `ENV_VAR_NAME` environment variable. + +IMPORTANT: Previous versions of this plugin put the environment variables as +fields at the root level of the event. Current versions make use of the +`@metadata` field, as outlined. You have to change `add_field_from_env` in +the older versions to `add_metadata_from_env` in the newer version. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Environment Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-add_metadata_from_env>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-add_metadata_from_env"] +===== `add_metadata_from_env` + + * Value type is <> + * Default value is `{}` + +Specify a hash of field names and the environment variable name with the +value you want imported into Logstash. For example: + + add_metadata_from_env => { "field_name" => "ENV_VAR_NAME" } + +or + + add_metadata_from_env => { + "field1" => "ENV1" + "field2" => "ENV2" + # "field_n" => "ENV_n" + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/environment-v3.0.2.asciidoc b/docs/versioned-plugins/filters/environment-v3.0.2.asciidoc new file mode 100644 index 000000000..c9af23b9a --- /dev/null +++ b/docs/versioned-plugins/filters/environment-v3.0.2.asciidoc @@ -0,0 +1,83 @@ +:plugin: environment +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-environment/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Environment filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter stores environment variables as subfields in the `@metadata` field. +You can then use these values in other parts of the pipeline. + +Adding environment variables is as easy as: + filter { + environment { + add_metadata_from_env => { "field_name" => "ENV_VAR_NAME" } + } + } + +Accessing stored environment variables is now done through the `@metadata` field: + + ["@metadata"]["field_name"] + +This would reference field `field_name`, which in the above example references +the `ENV_VAR_NAME` environment variable. + +IMPORTANT: Previous versions of this plugin put the environment variables as +fields at the root level of the event. Current versions make use of the +`@metadata` field, as outlined. You have to change `add_field_from_env` in +the older versions to `add_metadata_from_env` in the newer version. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Environment Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-add_metadata_from_env>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-add_metadata_from_env"] +===== `add_metadata_from_env` + + * Value type is <> + * Default value is `{}` + +Specify a hash of field names and the environment variable name with the +value you want imported into Logstash. For example: + + add_metadata_from_env => { "field_name" => "ENV_VAR_NAME" } + +or + + add_metadata_from_env => { + "field1" => "ENV1" + "field2" => "ENV2" + # "field_n" => "ENV_n" + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/environment-v3.0.3.asciidoc b/docs/versioned-plugins/filters/environment-v3.0.3.asciidoc new file mode 100644 index 000000000..64b47414d --- /dev/null +++ b/docs/versioned-plugins/filters/environment-v3.0.3.asciidoc @@ -0,0 +1,83 @@ +:plugin: environment +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-environment/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Environment filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter stores environment variables as subfields in the `@metadata` field. +You can then use these values in other parts of the pipeline. + +Adding environment variables is as easy as: + filter { + environment { + add_metadata_from_env => { "field_name" => "ENV_VAR_NAME" } + } + } + +Accessing stored environment variables is now done through the `@metadata` field: + + ["@metadata"]["field_name"] + +This would reference field `field_name`, which in the above example references +the `ENV_VAR_NAME` environment variable. + +IMPORTANT: Previous versions of this plugin put the environment variables as +fields at the root level of the event. Current versions make use of the +`@metadata` field, as outlined. You have to change `add_field_from_env` in +the older versions to `add_metadata_from_env` in the newer version. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Environment Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-add_metadata_from_env>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-add_metadata_from_env"] +===== `add_metadata_from_env` + + * Value type is <> + * Default value is `{}` + +Specify a hash of field names and the environment variable name with the +value you want imported into Logstash. For example: + + add_metadata_from_env => { "field_name" => "ENV_VAR_NAME" } + +or + + add_metadata_from_env => { + "field1" => "ENV1" + "field2" => "ENV2" + # "field_n" => "ENV_n" + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/example-index.asciidoc b/docs/versioned-plugins/filters/example-index.asciidoc new file mode 100644 index 000000000..491dc50e6 --- /dev/null +++ b/docs/versioned-plugins/filters/example-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: example +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/filters/extractnumbers-index.asciidoc b/docs/versioned-plugins/filters/extractnumbers-index.asciidoc new file mode 100644 index 000000000..99cb8fb71 --- /dev/null +++ b/docs/versioned-plugins/filters/extractnumbers-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: extractnumbers +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::extractnumbers-v3.0.3.asciidoc[] +include::extractnumbers-v3.0.2.asciidoc[] +include::extractnumbers-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/extractnumbers-v3.0.1.asciidoc b/docs/versioned-plugins/filters/extractnumbers-v3.0.1.asciidoc new file mode 100644 index 000000000..e341c17ee --- /dev/null +++ b/docs/versioned-plugins/filters/extractnumbers-v3.0.1.asciidoc @@ -0,0 +1,61 @@ +:plugin: extractnumbers +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-extractnumbers/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Extractnumbers filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter automatically extracts all numbers found inside a string + +This is useful when you have lines that don't match a grok pattern +or use json but you still need to extract numbers. + +Each numbers is returned in a `@fields.intX` or `@fields.floatX` field +where X indicates the position in the string. + +The fields produced by this filter are extra useful used in combination +with kibana number plotting features. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Extractnumbers Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The source field for the data. By default is message. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/extractnumbers-v3.0.2.asciidoc b/docs/versioned-plugins/filters/extractnumbers-v3.0.2.asciidoc new file mode 100644 index 000000000..cc8661d69 --- /dev/null +++ b/docs/versioned-plugins/filters/extractnumbers-v3.0.2.asciidoc @@ -0,0 +1,61 @@ +:plugin: extractnumbers +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-extractnumbers/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Extractnumbers filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter automatically extracts all numbers found inside a string + +This is useful when you have lines that don't match a grok pattern +or use json but you still need to extract numbers. + +Each numbers is returned in a `@fields.intX` or `@fields.floatX` field +where X indicates the position in the string. + +The fields produced by this filter are extra useful used in combination +with kibana number plotting features. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Extractnumbers Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The source field for the data. By default is message. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/extractnumbers-v3.0.3.asciidoc b/docs/versioned-plugins/filters/extractnumbers-v3.0.3.asciidoc new file mode 100644 index 000000000..41b3cdac2 --- /dev/null +++ b/docs/versioned-plugins/filters/extractnumbers-v3.0.3.asciidoc @@ -0,0 +1,61 @@ +:plugin: extractnumbers +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-extractnumbers/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Extractnumbers filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter automatically extracts all numbers found inside a string + +This is useful when you have lines that don't match a grok pattern +or use json but you still need to extract numbers. + +Each numbers is returned in a `@fields.intX` or `@fields.floatX` field +where X indicates the position in the string. + +The fields produced by this filter are extra useful used in combination +with kibana number plotting features. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Extractnumbers Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The source field for the data. By default is message. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/fingerprint-index.asciidoc b/docs/versioned-plugins/filters/fingerprint-index.asciidoc new file mode 100644 index 000000000..c3e7f2484 --- /dev/null +++ b/docs/versioned-plugins/filters/fingerprint-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: fingerprint +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-08-14 +| <> | 2017-06-23 +|======================================================================= + +include::fingerprint-v3.1.2.asciidoc[] +include::fingerprint-v3.1.1.asciidoc[] +include::fingerprint-v3.1.0.asciidoc[] +include::fingerprint-v3.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/filters/fingerprint-v3.0.4.asciidoc b/docs/versioned-plugins/filters/fingerprint-v3.0.4.asciidoc new file mode 100644 index 000000000..acd49ef1d --- /dev/null +++ b/docs/versioned-plugins/filters/fingerprint-v3.0.4.asciidoc @@ -0,0 +1,139 @@ +:plugin: fingerprint +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-fingerprint/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Fingerprint filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Create consistent hashes (fingerprints) of one or more fields and store +the result in a new field. + +This can e.g. be used to create consistent document ids when inserting +events into Elasticsearch, allowing events in Logstash to cause existing +documents to be updated rather than new documents to be created. + +NOTE: When using any method other than 'UUID', 'PUNCTUATION' or 'MURMUR3' +you must set the key, otherwise the plugin will raise an exception + +NOTE: When the `target` option is set to `UUID` the result won't be +a consistent hash but a random +https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID]. +To generate UUIDs, prefer the <>. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Fingerprint Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-base64encode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-concatenate_sources>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-method>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5", "MURMUR3", "IPV4_NETWORK", "UUID", "PUNCTUATION"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-base64encode"] +===== `base64encode` + + * Value type is <> + * Default value is `false` + +When set to `true`, the `SHA1`, `SHA256`, `SHA384`, `SHA512` and `MD5` fingerprint methods will produce +base64 encoded rather than hex encoded strings. + +[id="{version}-plugins-{type}s-{plugin}-concatenate_sources"] +===== `concatenate_sources` + + * Value type is <> + * Default value is `false` + +When set to `true` and `method` isn't `UUID` or `PUNCTUATION`, the +plugin concatenates the names and values of all fields given in the +`source` option into one string (like the old checksum filter) before +doing the fingerprint computation. If `false` and multiple source +fields are given, the target field will be an array with fingerprints +of the source fields given. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * There is no default value for this setting. + +When used with the `IPV4_NETWORK` method fill in the subnet prefix length. +Key is required with all methods except `MURMUR3`, `PUNCTUATION` or `UUID`. +With other methods fill in the HMAC key. + +[id="{version}-plugins-{type}s-{plugin}-method"] +===== `method` + + * This is a required setting. + * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5`, `MURMUR3`, `IPV4_NETWORK`, `UUID`, `PUNCTUATION` + * Default value is `"SHA1"` + +The fingerprint method to use. + +If set to `SHA1`, `SHA256`, `SHA384`, `SHA512`, or `MD5` the +cryptographic keyed-hash function with the same name will be used to +generate the fingerprint. If set to `MURMUR3` the non-cryptographic +MurmurHash function will be used. + +If set to `IPV4_NETWORK` the input data needs to be a IPv4 address and +the hash value will be the masked-out address using the number of bits +specified in the `key` option. For example, with "1.2.3.4" as the input +and `key` set to 16, the hash becomes "1.2.0.0". + +If set to `PUNCTUATION`, all non-punctuation characters will be removed +from the input string. + +If set to `UUID`, a +https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID] will +be generated. The result will be random and thus not a consistent hash. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The name(s) of the source field(s) whose contents will be used +to create the fingerprint. If an array is given, see the +`concatenate_sources` option. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"fingerprint"` + +The name of the field where the generated fingerprint will be stored. +Any current contents of that field will be overwritten. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/fingerprint-v3.1.0.asciidoc b/docs/versioned-plugins/filters/fingerprint-v3.1.0.asciidoc new file mode 100644 index 000000000..5fff72e2a --- /dev/null +++ b/docs/versioned-plugins/filters/fingerprint-v3.1.0.asciidoc @@ -0,0 +1,153 @@ +:plugin: fingerprint +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.0 +:release_date: 2017-08-14 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-fingerprint/blob/v3.1.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Fingerprint filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Create consistent hashes (fingerprints) of one or more fields and store +the result in a new field. + +This can e.g. be used to create consistent document ids when inserting +events into Elasticsearch, allowing events in Logstash to cause existing +documents to be updated rather than new documents to be created. + +NOTE: When using any method other than 'UUID', 'PUNCTUATION' or 'MURMUR3' +you must set the key, otherwise the plugin will raise an exception + +NOTE: When the `target` option is set to `UUID` the result won't be +a consistent hash but a random +https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID]. +To generate UUIDs, prefer the <>. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Fingerprint Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-base64encode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-concatenate_sources>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-concatenate_all_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-method>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5", "MURMUR3", "IPV4_NETWORK", "UUID", "PUNCTUATION"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-base64encode"] +===== `base64encode` + + * Value type is <> + * Default value is `false` + +When set to `true`, the `SHA1`, `SHA256`, `SHA384`, `SHA512` and `MD5` fingerprint methods will produce +base64 encoded rather than hex encoded strings. + +[id="{version}-plugins-{type}s-{plugin}-concatenate_sources"] +===== `concatenate_sources` + + * Value type is <> + * Default value is `false` + +When set to `true` and `method` isn't `UUID` or `PUNCTUATION`, the +plugin concatenates the names and values of all fields given in the +`source` option into one string (like the old checksum filter) before +doing the fingerprint computation. If `false` and multiple source +fields are given, the target field will be an array with fingerprints +of the source fields given. + +[id="{version}-plugins-{type}s-{plugin}-concatenate_all_fields"] +===== `concatenate_sources` + + * Value type is <> + * Default value is `false` + +When set to `true` and `method` isn't `UUID` or `PUNCTUATION`, the +plugin concatenates the names and values of all fields of the event +into one string (like the old checksum filter) before doing the +fingerprint computation. If `false` and at least one source field is +given, the target field will be an array with fingerprints of the +source fields given. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * There is no default value for this setting. + +When used with the `IPV4_NETWORK` method fill in the subnet prefix length. +Key is required with all methods except `MURMUR3`, `PUNCTUATION` or `UUID`. +With other methods fill in the HMAC key. + +[id="{version}-plugins-{type}s-{plugin}-method"] +===== `method` + + * This is a required setting. + * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5`, `MURMUR3`, `IPV4_NETWORK`, `UUID`, `PUNCTUATION` + * Default value is `"SHA1"` + +The fingerprint method to use. + +If set to `SHA1`, `SHA256`, `SHA384`, `SHA512`, or `MD5` the +cryptographic keyed-hash function with the same name will be used to +generate the fingerprint. If set to `MURMUR3` the non-cryptographic +MurmurHash function will be used. + +If set to `IPV4_NETWORK` the input data needs to be a IPv4 address and +the hash value will be the masked-out address using the number of bits +specified in the `key` option. For example, with "1.2.3.4" as the input +and `key` set to 16, the hash becomes "1.2.0.0". + +If set to `PUNCTUATION`, all non-punctuation characters will be removed +from the input string. + +If set to `UUID`, a +https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID] will +be generated. The result will be random and thus not a consistent hash. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The name(s) of the source field(s) whose contents will be used +to create the fingerprint. If an array is given, see the +`concatenate_sources` option. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"fingerprint"` + +The name of the field where the generated fingerprint will be stored. +Any current contents of that field will be overwritten. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/fingerprint-v3.1.1.asciidoc b/docs/versioned-plugins/filters/fingerprint-v3.1.1.asciidoc new file mode 100644 index 000000000..45f531d13 --- /dev/null +++ b/docs/versioned-plugins/filters/fingerprint-v3.1.1.asciidoc @@ -0,0 +1,153 @@ +:plugin: fingerprint +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.1 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-fingerprint/blob/v3.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Fingerprint filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Create consistent hashes (fingerprints) of one or more fields and store +the result in a new field. + +This can e.g. be used to create consistent document ids when inserting +events into Elasticsearch, allowing events in Logstash to cause existing +documents to be updated rather than new documents to be created. + +NOTE: When using any method other than 'UUID', 'PUNCTUATION' or 'MURMUR3' +you must set the key, otherwise the plugin will raise an exception + +NOTE: When the `target` option is set to `UUID` the result won't be +a consistent hash but a random +https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID]. +To generate UUIDs, prefer the <>. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Fingerprint Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-base64encode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-concatenate_sources>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-concatenate_all_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-method>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5", "MURMUR3", "IPV4_NETWORK", "UUID", "PUNCTUATION"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-base64encode"] +===== `base64encode` + + * Value type is <> + * Default value is `false` + +When set to `true`, the `SHA1`, `SHA256`, `SHA384`, `SHA512` and `MD5` fingerprint methods will produce +base64 encoded rather than hex encoded strings. + +[id="{version}-plugins-{type}s-{plugin}-concatenate_sources"] +===== `concatenate_sources` + + * Value type is <> + * Default value is `false` + +When set to `true` and `method` isn't `UUID` or `PUNCTUATION`, the +plugin concatenates the names and values of all fields given in the +`source` option into one string (like the old checksum filter) before +doing the fingerprint computation. If `false` and multiple source +fields are given, the target field will be an array with fingerprints +of the source fields given. + +[id="{version}-plugins-{type}s-{plugin}-concatenate_all_fields"] +===== `concatenate_sources` + + * Value type is <> + * Default value is `false` + +When set to `true` and `method` isn't `UUID` or `PUNCTUATION`, the +plugin concatenates the names and values of all fields of the event +into one string (like the old checksum filter) before doing the +fingerprint computation. If `false` and at least one source field is +given, the target field will be an array with fingerprints of the +source fields given. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * There is no default value for this setting. + +When used with the `IPV4_NETWORK` method fill in the subnet prefix length. +Key is required with all methods except `MURMUR3`, `PUNCTUATION` or `UUID`. +With other methods fill in the HMAC key. + +[id="{version}-plugins-{type}s-{plugin}-method"] +===== `method` + + * This is a required setting. + * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5`, `MURMUR3`, `IPV4_NETWORK`, `UUID`, `PUNCTUATION` + * Default value is `"SHA1"` + +The fingerprint method to use. + +If set to `SHA1`, `SHA256`, `SHA384`, `SHA512`, or `MD5` the +cryptographic keyed-hash function with the same name will be used to +generate the fingerprint. If set to `MURMUR3` the non-cryptographic +MurmurHash function will be used. + +If set to `IPV4_NETWORK` the input data needs to be a IPv4 address and +the hash value will be the masked-out address using the number of bits +specified in the `key` option. For example, with "1.2.3.4" as the input +and `key` set to 16, the hash becomes "1.2.0.0". + +If set to `PUNCTUATION`, all non-punctuation characters will be removed +from the input string. + +If set to `UUID`, a +https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID] will +be generated. The result will be random and thus not a consistent hash. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The name(s) of the source field(s) whose contents will be used +to create the fingerprint. If an array is given, see the +`concatenate_sources` option. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"fingerprint"` + +The name of the field where the generated fingerprint will be stored. +Any current contents of that field will be overwritten. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/fingerprint-v3.1.2.asciidoc b/docs/versioned-plugins/filters/fingerprint-v3.1.2.asciidoc new file mode 100644 index 000000000..a7664b280 --- /dev/null +++ b/docs/versioned-plugins/filters/fingerprint-v3.1.2.asciidoc @@ -0,0 +1,153 @@ +:plugin: fingerprint +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-fingerprint/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Fingerprint filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Create consistent hashes (fingerprints) of one or more fields and store +the result in a new field. + +This can e.g. be used to create consistent document ids when inserting +events into Elasticsearch, allowing events in Logstash to cause existing +documents to be updated rather than new documents to be created. + +NOTE: When using any method other than 'UUID', 'PUNCTUATION' or 'MURMUR3' +you must set the key, otherwise the plugin will raise an exception + +NOTE: When the `target` option is set to `UUID` the result won't be +a consistent hash but a random +https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID]. +To generate UUIDs, prefer the <>. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Fingerprint Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-base64encode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-concatenate_sources>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-concatenate_all_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-method>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5", "MURMUR3", "IPV4_NETWORK", "UUID", "PUNCTUATION"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-base64encode"] +===== `base64encode` + + * Value type is <> + * Default value is `false` + +When set to `true`, the `SHA1`, `SHA256`, `SHA384`, `SHA512` and `MD5` fingerprint methods will produce +base64 encoded rather than hex encoded strings. + +[id="{version}-plugins-{type}s-{plugin}-concatenate_sources"] +===== `concatenate_sources` + + * Value type is <> + * Default value is `false` + +When set to `true` and `method` isn't `UUID` or `PUNCTUATION`, the +plugin concatenates the names and values of all fields given in the +`source` option into one string (like the old checksum filter) before +doing the fingerprint computation. If `false` and multiple source +fields are given, the target field will be an array with fingerprints +of the source fields given. + +[id="{version}-plugins-{type}s-{plugin}-concatenate_all_fields"] +===== `concatenate_sources` + + * Value type is <> + * Default value is `false` + +When set to `true` and `method` isn't `UUID` or `PUNCTUATION`, the +plugin concatenates the names and values of all fields of the event +into one string (like the old checksum filter) before doing the +fingerprint computation. If `false` and at least one source field is +given, the target field will be an array with fingerprints of the +source fields given. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * There is no default value for this setting. + +When used with the `IPV4_NETWORK` method fill in the subnet prefix length. +Key is required with all methods except `MURMUR3`, `PUNCTUATION` or `UUID`. +With other methods fill in the HMAC key. + +[id="{version}-plugins-{type}s-{plugin}-method"] +===== `method` + + * This is a required setting. + * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5`, `MURMUR3`, `IPV4_NETWORK`, `UUID`, `PUNCTUATION` + * Default value is `"SHA1"` + +The fingerprint method to use. + +If set to `SHA1`, `SHA256`, `SHA384`, `SHA512`, or `MD5` the +cryptographic keyed-hash function with the same name will be used to +generate the fingerprint. If set to `MURMUR3` the non-cryptographic +MurmurHash function will be used. + +If set to `IPV4_NETWORK` the input data needs to be a IPv4 address and +the hash value will be the masked-out address using the number of bits +specified in the `key` option. For example, with "1.2.3.4" as the input +and `key` set to 16, the hash becomes "1.2.0.0". + +If set to `PUNCTUATION`, all non-punctuation characters will be removed +from the input string. + +If set to `UUID`, a +https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID] will +be generated. The result will be random and thus not a consistent hash. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The name(s) of the source field(s) whose contents will be used +to create the fingerprint. If an array is given, see the +`concatenate_sources` option. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"fingerprint"` + +The name of the field where the generated fingerprint will be stored. +Any current contents of that field will be overwritten. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/geoip-index.asciidoc b/docs/versioned-plugins/filters/geoip-index.asciidoc new file mode 100644 index 000000000..5b47f7cec --- /dev/null +++ b/docs/versioned-plugins/filters/geoip-index.asciidoc @@ -0,0 +1,28 @@ +:plugin: geoip +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-21 +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-08-01 +| <> | 2017-08-18 +| <> | 2017-07-17 +| <> | 2017-06-23 +| <> | 2017-06-22 +| <> | 2017-05-15 +|======================================================================= + +include::geoip-v5.0.3.asciidoc[] +include::geoip-v5.0.2.asciidoc[] +include::geoip-v5.0.1.asciidoc[] +include::geoip-v5.0.0.asciidoc[] +include::geoip-v4.3.1.asciidoc[] +include::geoip-v4.3.0.asciidoc[] +include::geoip-v4.2.1.asciidoc[] +include::geoip-v4.2.0.asciidoc[] +include::geoip-v4.1.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/geoip-v4.1.1.asciidoc b/docs/versioned-plugins/filters/geoip-v4.1.1.asciidoc new file mode 100644 index 000000000..55a042775 --- /dev/null +++ b/docs/versioned-plugins/filters/geoip-v4.1.1.asciidoc @@ -0,0 +1,180 @@ +:plugin: geoip +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.1 +:release_date: 2017-05-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v4.1.1/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Geoip + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The GeoIP filter adds information about the geographical location of IP addresses, +based on data from the Maxmind GeoLite2 database. Commercial databases from Maxmind are +also supported in this plugin. + +A `[geoip][location]` field is created if +the GeoIP lookup returns a latitude and longitude. The field is stored in +http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, +the default Elasticsearch template provided with the +<> maps +the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. + +As this field is a `geo_point` _and_ it is still valid GeoJSON, you get +the awesomeness of Elasticsearch's geospatial query, facet and filter functions +and the flexibility of having GeoJSON for all other applications (like Kibana's +map visualization). + +[NOTE] +-- +This product includes GeoLite2 data created by MaxMind, available from +http://www.maxmind.com. This database is licensed under +http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. + +Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database +and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy +MaxMind GeoLite database and support IPv4 lookups only. +-- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Geoip Filter Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_size"] +===== `cache_size` + + * Value type is <> + * Default value is `1000` + +GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that +IPs agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. +Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter +of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-database"] +===== `database` + + * Value type is <> + * There is no default value for this setting. + +The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. +GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. +GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. + +If not specified, this will default to the GeoLite2 City database that ships +with Logstash. + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * There is no default value for this setting. + +An array of geoip fields to be included in the event. + +Possible fields depend on the database type. By default, all geoip fields +are included in the event. + +For the built-in GeoLite2 City database, the following are available: +`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, +`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. + +[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] +===== `lru_cache_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `1000` + +GeoIP lookup is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that +IPs agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. + +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter +of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field containing the IP address or hostname to map via geoip. If +this field is an array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_geoip_lookup_failure"]` + +Tags the event on failure to look up geo information. This can be used in later analysis. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"geoip"` + +Specify the field into which Logstash should store the geoip data. +This can be useful, for example, if you have `src_ip` and `dst_ip` fields and +would like the GeoIP information of both IPs. + +If you save the data to a target field other than `geoip` and want to use the +`geo_point` related functions in Elasticsearch, you need to alter the template +provided with the Elasticsearch output and configure the output to use the +new template. + +Even if you don't use the `geo_point` mapping, the `[target][location]` field +is still valid GeoJSON. + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/geoip-v4.2.0.asciidoc b/docs/versioned-plugins/filters/geoip-v4.2.0.asciidoc new file mode 100644 index 000000000..7661e39ae --- /dev/null +++ b/docs/versioned-plugins/filters/geoip-v4.2.0.asciidoc @@ -0,0 +1,195 @@ +:plugin: geoip +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.2.0 +:release_date: 2017-06-22 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v4.2.0/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Geoip filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The GeoIP filter adds information about the geographical location of IP addresses, +based on data from the Maxmind GeoLite2 databases. + +==== Supported Databases + +This plugin is bundled with https://dev.maxmind.com/geoip/geoip2/geolite2[GeoLite2] City database out of the box. From Maxmind's description -- +"GeoLite2 databases are free IP geolocation databases comparable to, but less accurate than, MaxMind’s +GeoIP2 databases". Please see GeoIP Lite2 license for more details. + +https://www.maxmind.com/en/geoip2-databases[Commercial databases] from Maxmind are also supported in this plugin. + +If you need to use databases other than the bundled GeoLite2 City, you can download them directly +from Maxmind's website and use the `database` option to specify their location. The GeoLite2 databases +can be downloaded from https://dev.maxmind.com/geoip/geoip2/geolite2[here]. + +If you would like to get Autonomous System Number(ASN) information, you can use the GeoLite2-ASN database. + +==== Details + +A `[geoip][location]` field is created if +the GeoIP lookup returns a latitude and longitude. The field is stored in +http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, +the default Elasticsearch template provided with the +<> maps +the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. + +As this field is a `geo_point` _and_ it is still valid GeoJSON, you get +the awesomeness of Elasticsearch's geospatial query, facet and filter functions +and the flexibility of having GeoJSON for all other applications (like Kibana's +map visualization). + +[NOTE] +-- +This product includes GeoLite2 data created by MaxMind, available from +http://www.maxmind.com. This database is licensed under +http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. + +Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database +and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy +MaxMind GeoLite database and support IPv4 lookups only. +-- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Geoip Filter Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_size"] +===== `cache_size` + + * Value type is <> + * Default value is `1000` + +GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that +IPs agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. +Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter +of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-database"] +===== `database` + + * Value type is <> + * There is no default value for this setting. + +The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. +GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. +GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. + +If not specified, this will default to the GeoLite2 City database that ships +with Logstash. + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * There is no default value for this setting. + +An array of geoip fields to be included in the event. + +Possible fields depend on the database type. By default, all geoip fields +are included in the event. + +For the built-in GeoLite2 City database, the following are available: +`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, +`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. + +[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] +===== `lru_cache_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `1000` + +GeoIP lookup is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that +IPs agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. + +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter +of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field containing the IP address or hostname to map via geoip. If +this field is an array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_geoip_lookup_failure"]` + +Tags the event on failure to look up geo information. This can be used in later analysis. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"geoip"` + +Specify the field into which Logstash should store the geoip data. +This can be useful, for example, if you have `src_ip` and `dst_ip` fields and +would like the GeoIP information of both IPs. + +If you save the data to a target field other than `geoip` and want to use the +`geo_point` related functions in Elasticsearch, you need to alter the template +provided with the Elasticsearch output and configure the output to use the +new template. + +Even if you don't use the `geo_point` mapping, the `[target][location]` field +is still valid GeoJSON. + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/geoip-v4.2.1.asciidoc b/docs/versioned-plugins/filters/geoip-v4.2.1.asciidoc new file mode 100644 index 000000000..093847bce --- /dev/null +++ b/docs/versioned-plugins/filters/geoip-v4.2.1.asciidoc @@ -0,0 +1,196 @@ +:plugin: geoip +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.2.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v4.2.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Geoip filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The GeoIP filter adds information about the geographical location of IP addresses, +based on data from the Maxmind GeoLite2 databases. + +==== Supported Databases + +This plugin is bundled with https://dev.maxmind.com/geoip/geoip2/geolite2[GeoLite2] City database out of the box. From Maxmind's description -- +"GeoLite2 databases are free IP geolocation databases comparable to, but less accurate than, MaxMind’s +GeoIP2 databases". Please see GeoIP Lite2 license for more details. + +https://www.maxmind.com/en/geoip2-databases[Commercial databases] from Maxmind are also supported in this plugin. + +If you need to use databases other than the bundled GeoLite2 City, you can download them directly +from Maxmind's website and use the `database` option to specify their location. The GeoLite2 databases +can be downloaded from https://dev.maxmind.com/geoip/geoip2/geolite2[here]. + +If you would like to get Autonomous System Number(ASN) information, you can use the GeoLite2-ASN database. + +==== Details + +A `[geoip][location]` field is created if +the GeoIP lookup returns a latitude and longitude. The field is stored in +http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, +the default Elasticsearch template provided with the +<> maps +the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. + +As this field is a `geo_point` _and_ it is still valid GeoJSON, you get +the awesomeness of Elasticsearch's geospatial query, facet and filter functions +and the flexibility of having GeoJSON for all other applications (like Kibana's +map visualization). + +[NOTE] +-- +This product includes GeoLite2 data created by MaxMind, available from +http://www.maxmind.com. This database is licensed under +http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. + +Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database +and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy +MaxMind GeoLite database and support IPv4 lookups only. +-- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Geoip Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_size"] +===== `cache_size` + + * Value type is <> + * Default value is `1000` + +GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that +IPs agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. +Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter +of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-database"] +===== `database` + + * Value type is <> + * There is no default value for this setting. + +The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. +GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. +GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. + +If not specified, this will default to the GeoLite2 City database that ships +with Logstash. + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * There is no default value for this setting. + +An array of geoip fields to be included in the event. + +Possible fields depend on the database type. By default, all geoip fields +are included in the event. + +For the built-in GeoLite2 City database, the following are available: +`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, +`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. + +[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] +===== `lru_cache_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `1000` + +GeoIP lookup is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that +IPs agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. + +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter +of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field containing the IP address or hostname to map via geoip. If +this field is an array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_geoip_lookup_failure"]` + +Tags the event on failure to look up geo information. This can be used in later analysis. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"geoip"` + +Specify the field into which Logstash should store the geoip data. +This can be useful, for example, if you have `src_ip` and `dst_ip` fields and +would like the GeoIP information of both IPs. + +If you save the data to a target field other than `geoip` and want to use the +`geo_point` related functions in Elasticsearch, you need to alter the template +provided with the Elasticsearch output and configure the output to use the +new template. + +Even if you don't use the `geo_point` mapping, the `[target][location]` field +is still valid GeoJSON. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/geoip-v4.3.0.asciidoc b/docs/versioned-plugins/filters/geoip-v4.3.0.asciidoc new file mode 100644 index 000000000..b8ab940d7 --- /dev/null +++ b/docs/versioned-plugins/filters/geoip-v4.3.0.asciidoc @@ -0,0 +1,206 @@ +:plugin: geoip +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.3.0 +:release_date: 2017-07-17 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v4.3.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Geoip filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The GeoIP filter adds information about the geographical location of IP addresses, +based on data from the Maxmind GeoLite2 databases. + +==== Supported Databases + +This plugin is bundled with https://dev.maxmind.com/geoip/geoip2/geolite2[GeoLite2] City database out of the box. From Maxmind's description -- +"GeoLite2 databases are free IP geolocation databases comparable to, but less accurate than, MaxMind’s +GeoIP2 databases". Please see GeoIP Lite2 license for more details. + +https://www.maxmind.com/en/geoip2-databases[Commercial databases] from Maxmind are also supported in this plugin. + +If you need to use databases other than the bundled GeoLite2 City, you can download them directly +from Maxmind's website and use the `database` option to specify their location. The GeoLite2 databases +can be downloaded from https://dev.maxmind.com/geoip/geoip2/geolite2[here]. + +If you would like to get Autonomous System Number(ASN) information, you can use the GeoLite2-ASN database. + +==== Details + +A `[geoip][location]` field is created if +the GeoIP lookup returns a latitude and longitude. The field is stored in +http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, +the default Elasticsearch template provided with the +<> maps +the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. + +As this field is a `geo_point` _and_ it is still valid GeoJSON, you get +the awesomeness of Elasticsearch's geospatial query, facet and filter functions +and the flexibility of having GeoJSON for all other applications (like Kibana's +map visualization). + +[NOTE] +-- +This product includes GeoLite2 data created by MaxMind, available from +http://www.maxmind.com. This database is licensed under +http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. + +Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database +and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy +MaxMind GeoLite database and support IPv4 lookups only. +-- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Geoip Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-default_database_type>> |`City` or `ASN`|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_size"] +===== `cache_size` + + * Value type is <> + * Default value is `1000` + +GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that +IPs agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. +Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter +of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-database"] +===== `database` + + * Value type is <> + * There is no default value for this setting. + +The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. +GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. +GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. + +If not specified, this will default to the GeoLite2 City database that ships +with Logstash. + +[id="{version}-plugins-{type}s-{plugin}-default_database_type"] +===== `default_database_type` + +This plugin now includes both the GeoLite2-City and GeoLite2-ASN databases. If `database` and `default_database_type` are unset, the GeoLite2-City database will be selected. To use the included GeoLite2-ASN database, set `default_database_type` to `ASN`. + + * Value type is <> + * The default value is `City` + * The only acceptable values are `City` and `ASN` + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * There is no default value for this setting. + +An array of geoip fields to be included in the event. + +Possible fields depend on the database type. By default, all geoip fields +are included in the event. + +For the built-in GeoLite2 City database, the following are available: +`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, +`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. + +[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] +===== `lru_cache_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `1000` + +GeoIP lookup is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that +IPs agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. + +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter +of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field containing the IP address or hostname to map via geoip. If +this field is an array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_geoip_lookup_failure"]` + +Tags the event on failure to look up geo information. This can be used in later analysis. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"geoip"` + +Specify the field into which Logstash should store the geoip data. +This can be useful, for example, if you have `src_ip` and `dst_ip` fields and +would like the GeoIP information of both IPs. + +If you save the data to a target field other than `geoip` and want to use the +`geo_point` related functions in Elasticsearch, you need to alter the template +provided with the Elasticsearch output and configure the output to use the +new template. + +Even if you don't use the `geo_point` mapping, the `[target][location]` field +is still valid GeoJSON. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/geoip-v4.3.1.asciidoc b/docs/versioned-plugins/filters/geoip-v4.3.1.asciidoc new file mode 100644 index 000000000..a7bb66dee --- /dev/null +++ b/docs/versioned-plugins/filters/geoip-v4.3.1.asciidoc @@ -0,0 +1,206 @@ +:plugin: geoip +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.3.1 +:release_date: 2017-08-18 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v4.3.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Geoip filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The GeoIP filter adds information about the geographical location of IP addresses, +based on data from the Maxmind GeoLite2 databases. + +==== Supported Databases + +This plugin is bundled with https://dev.maxmind.com/geoip/geoip2/geolite2[GeoLite2] City database out of the box. From Maxmind's description -- +"GeoLite2 databases are free IP geolocation databases comparable to, but less accurate than, MaxMind’s +GeoIP2 databases". Please see GeoIP Lite2 license for more details. + +https://www.maxmind.com/en/geoip2-databases[Commercial databases] from Maxmind are also supported in this plugin. + +If you need to use databases other than the bundled GeoLite2 City, you can download them directly +from Maxmind's website and use the `database` option to specify their location. The GeoLite2 databases +can be downloaded from https://dev.maxmind.com/geoip/geoip2/geolite2[here]. + +If you would like to get Autonomous System Number(ASN) information, you can use the GeoLite2-ASN database. + +==== Details + +A `[geoip][location]` field is created if +the GeoIP lookup returns a latitude and longitude. The field is stored in +http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, +the default Elasticsearch template provided with the +<> maps +the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. + +As this field is a `geo_point` _and_ it is still valid GeoJSON, you get +the awesomeness of Elasticsearch's geospatial query, facet and filter functions +and the flexibility of having GeoJSON for all other applications (like Kibana's +map visualization). + +[NOTE] +-- +This product includes GeoLite2 data created by MaxMind, available from +http://www.maxmind.com. This database is licensed under +http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. + +Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database +and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy +MaxMind GeoLite database and support IPv4 lookups only. +-- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Geoip Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-default_database_type>> |`City` or `ASN`|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_size"] +===== `cache_size` + + * Value type is <> + * Default value is `1000` + +GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that +IPs agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. +Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter +of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-database"] +===== `database` + + * Value type is <> + * There is no default value for this setting. + +The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. +GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. +GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. + +If not specified, this will default to the GeoLite2 City database that ships +with Logstash. + +[id="{version}-plugins-{type}s-{plugin}-default_database_type"] +===== `default_database_type` + +This plugin now includes both the GeoLite2-City and GeoLite2-ASN databases. If `database` and `default_database_type` are unset, the GeoLite2-City database will be selected. To use the included GeoLite2-ASN database, set `default_database_type` to `ASN`. + + * Value type is <> + * The default value is `City` + * The only acceptable values are `City` and `ASN` + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * There is no default value for this setting. + +An array of geoip fields to be included in the event. + +Possible fields depend on the database type. By default, all geoip fields +are included in the event. + +For the built-in GeoLite2 City database, the following are available: +`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, +`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. + +[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] +===== `lru_cache_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `1000` + +GeoIP lookup is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that +IPs agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. + +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter +of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field containing the IP address or hostname to map via geoip. If +this field is an array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_geoip_lookup_failure"]` + +Tags the event on failure to look up geo information. This can be used in later analysis. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"geoip"` + +Specify the field into which Logstash should store the geoip data. +This can be useful, for example, if you have `src_ip` and `dst_ip` fields and +would like the GeoIP information of both IPs. + +If you save the data to a target field other than `geoip` and want to use the +`geo_point` related functions in Elasticsearch, you need to alter the template +provided with the Elasticsearch output and configure the output to use the +new template. + +Even if you don't use the `geo_point` mapping, the `[target][location]` field +is still valid GeoJSON. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/geoip-v5.0.0.asciidoc b/docs/versioned-plugins/filters/geoip-v5.0.0.asciidoc new file mode 100644 index 000000000..2628074cb --- /dev/null +++ b/docs/versioned-plugins/filters/geoip-v5.0.0.asciidoc @@ -0,0 +1,184 @@ +:plugin: geoip +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.0 +:release_date: 2017-08-01 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v5.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Geoip filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The GeoIP filter adds information about the geographical location of IP addresses, +based on data from the Maxmind GeoLite2 databases. + +==== Supported Databases + +This plugin is bundled with https://dev.maxmind.com/geoip/geoip2/geolite2[GeoLite2] City database out of the box. From Maxmind's description -- +"GeoLite2 databases are free IP geolocation databases comparable to, but less accurate than, MaxMind’s +GeoIP2 databases". Please see GeoIP Lite2 license for more details. + +https://www.maxmind.com/en/geoip2-databases[Commercial databases] from Maxmind are also supported in this plugin. + +If you need to use databases other than the bundled GeoLite2 City, you can download them directly +from Maxmind's website and use the `database` option to specify their location. The GeoLite2 databases +can be downloaded from https://dev.maxmind.com/geoip/geoip2/geolite2[here]. + +If you would like to get Autonomous System Number(ASN) information, you can use the GeoLite2-ASN database. + +==== Details + +A `[geoip][location]` field is created if +the GeoIP lookup returns a latitude and longitude. The field is stored in +http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, +the default Elasticsearch template provided with the +<> maps +the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. + +As this field is a `geo_point` _and_ it is still valid GeoJSON, you get +the awesomeness of Elasticsearch's geospatial query, facet and filter functions +and the flexibility of having GeoJSON for all other applications (like Kibana's +map visualization). + +[NOTE] +-- +This product includes GeoLite2 data created by MaxMind, available from +http://www.maxmind.com. This database is licensed under +http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. + +Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database +and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy +MaxMind GeoLite database and support IPv4 lookups only. +-- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Geoip Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-default_database_type>> |`City` or `ASN`|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_size"] +===== `cache_size` + + * Value type is <> + * Default value is `1000` + +GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that +IPs agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. +Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter +of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-database"] +===== `database` + + * Value type is <> + * There is no default value for this setting. + +The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. +GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. +GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. + +If not specified, this will default to the GeoLite2 City database that ships +with Logstash. + +[id="{version}-plugins-{type}s-{plugin}-default_database_type"] +===== `default_database_type` + +This plugin now includes both the GeoLite2-City and GeoLite2-ASN databases. If `database` and `default_database_type` are unset, the GeoLite2-City database will be selected. To use the included GeoLite2-ASN database, set `default_database_type` to `ASN`. + + * Value type is <> + * The default value is `City` + * The only acceptable values are `City` and `ASN` + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * There is no default value for this setting. + +An array of geoip fields to be included in the event. + +Possible fields depend on the database type. By default, all geoip fields +are included in the event. + +For the built-in GeoLite2 City database, the following are available: +`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, +`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field containing the IP address or hostname to map via geoip. If +this field is an array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_geoip_lookup_failure"]` + +Tags the event on failure to look up geo information. This can be used in later analysis. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"geoip"` + +Specify the field into which Logstash should store the geoip data. +This can be useful, for example, if you have `src_ip` and `dst_ip` fields and +would like the GeoIP information of both IPs. + +If you save the data to a target field other than `geoip` and want to use the +`geo_point` related functions in Elasticsearch, you need to alter the template +provided with the Elasticsearch output and configure the output to use the +new template. + +Even if you don't use the `geo_point` mapping, the `[target][location]` field +is still valid GeoJSON. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/geoip-v5.0.1.asciidoc b/docs/versioned-plugins/filters/geoip-v5.0.1.asciidoc new file mode 100644 index 000000000..810f950a5 --- /dev/null +++ b/docs/versioned-plugins/filters/geoip-v5.0.1.asciidoc @@ -0,0 +1,184 @@ +:plugin: geoip +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.1 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v5.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Geoip filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The GeoIP filter adds information about the geographical location of IP addresses, +based on data from the Maxmind GeoLite2 databases. + +==== Supported Databases + +This plugin is bundled with https://dev.maxmind.com/geoip/geoip2/geolite2[GeoLite2] City database out of the box. From Maxmind's description -- +"GeoLite2 databases are free IP geolocation databases comparable to, but less accurate than, MaxMind’s +GeoIP2 databases". Please see GeoIP Lite2 license for more details. + +https://www.maxmind.com/en/geoip2-databases[Commercial databases] from Maxmind are also supported in this plugin. + +If you need to use databases other than the bundled GeoLite2 City, you can download them directly +from Maxmind's website and use the `database` option to specify their location. The GeoLite2 databases +can be downloaded from https://dev.maxmind.com/geoip/geoip2/geolite2[here]. + +If you would like to get Autonomous System Number(ASN) information, you can use the GeoLite2-ASN database. + +==== Details + +A `[geoip][location]` field is created if +the GeoIP lookup returns a latitude and longitude. The field is stored in +http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, +the default Elasticsearch template provided with the +<> maps +the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. + +As this field is a `geo_point` _and_ it is still valid GeoJSON, you get +the awesomeness of Elasticsearch's geospatial query, facet and filter functions +and the flexibility of having GeoJSON for all other applications (like Kibana's +map visualization). + +[NOTE] +-- +This product includes GeoLite2 data created by MaxMind, available from +http://www.maxmind.com. This database is licensed under +http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. + +Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database +and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy +MaxMind GeoLite database and support IPv4 lookups only. +-- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Geoip Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-default_database_type>> |`City` or `ASN`|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_size"] +===== `cache_size` + + * Value type is <> + * Default value is `1000` + +GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that +IPs agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. +Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter +of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-database"] +===== `database` + + * Value type is <> + * There is no default value for this setting. + +The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. +GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. +GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. + +If not specified, this will default to the GeoLite2 City database that ships +with Logstash. + +[id="{version}-plugins-{type}s-{plugin}-default_database_type"] +===== `default_database_type` + +This plugin now includes both the GeoLite2-City and GeoLite2-ASN databases. If `database` and `default_database_type` are unset, the GeoLite2-City database will be selected. To use the included GeoLite2-ASN database, set `default_database_type` to `ASN`. + + * Value type is <> + * The default value is `City` + * The only acceptable values are `City` and `ASN` + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * There is no default value for this setting. + +An array of geoip fields to be included in the event. + +Possible fields depend on the database type. By default, all geoip fields +are included in the event. + +For the built-in GeoLite2 City database, the following are available: +`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, +`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field containing the IP address or hostname to map via geoip. If +this field is an array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_geoip_lookup_failure"]` + +Tags the event on failure to look up geo information. This can be used in later analysis. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"geoip"` + +Specify the field into which Logstash should store the geoip data. +This can be useful, for example, if you have `src_ip` and `dst_ip` fields and +would like the GeoIP information of both IPs. + +If you save the data to a target field other than `geoip` and want to use the +`geo_point` related functions in Elasticsearch, you need to alter the template +provided with the Elasticsearch output and configure the output to use the +new template. + +Even if you don't use the `geo_point` mapping, the `[target][location]` field +is still valid GeoJSON. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/geoip-v5.0.2.asciidoc b/docs/versioned-plugins/filters/geoip-v5.0.2.asciidoc new file mode 100644 index 000000000..e0a3bfd72 --- /dev/null +++ b/docs/versioned-plugins/filters/geoip-v5.0.2.asciidoc @@ -0,0 +1,184 @@ +:plugin: geoip +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.2 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v5.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Geoip filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The GeoIP filter adds information about the geographical location of IP addresses, +based on data from the Maxmind GeoLite2 databases. + +==== Supported Databases + +This plugin is bundled with https://dev.maxmind.com/geoip/geoip2/geolite2[GeoLite2] City database out of the box. From Maxmind's description -- +"GeoLite2 databases are free IP geolocation databases comparable to, but less accurate than, MaxMind’s +GeoIP2 databases". Please see GeoIP Lite2 license for more details. + +https://www.maxmind.com/en/geoip2-databases[Commercial databases] from Maxmind are also supported in this plugin. + +If you need to use databases other than the bundled GeoLite2 City, you can download them directly +from Maxmind's website and use the `database` option to specify their location. The GeoLite2 databases +can be downloaded from https://dev.maxmind.com/geoip/geoip2/geolite2[here]. + +If you would like to get Autonomous System Number(ASN) information, you can use the GeoLite2-ASN database. + +==== Details + +A `[geoip][location]` field is created if +the GeoIP lookup returns a latitude and longitude. The field is stored in +http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, +the default Elasticsearch template provided with the +<> maps +the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. + +As this field is a `geo_point` _and_ it is still valid GeoJSON, you get +the awesomeness of Elasticsearch's geospatial query, facet and filter functions +and the flexibility of having GeoJSON for all other applications (like Kibana's +map visualization). + +[NOTE] +-- +This product includes GeoLite2 data created by MaxMind, available from +http://www.maxmind.com. This database is licensed under +http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. + +Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database +and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy +MaxMind GeoLite database and support IPv4 lookups only. +-- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Geoip Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-default_database_type>> |`City` or `ASN`|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_size"] +===== `cache_size` + + * Value type is <> + * Default value is `1000` + +GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that +IPs agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. +Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter +of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-database"] +===== `database` + + * Value type is <> + * There is no default value for this setting. + +The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. +GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. +GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. + +If not specified, this will default to the GeoLite2 City database that ships +with Logstash. + +[id="{version}-plugins-{type}s-{plugin}-default_database_type"] +===== `default_database_type` + +This plugin now includes both the GeoLite2-City and GeoLite2-ASN databases. If `database` and `default_database_type` are unset, the GeoLite2-City database will be selected. To use the included GeoLite2-ASN database, set `default_database_type` to `ASN`. + + * Value type is <> + * The default value is `City` + * The only acceptable values are `City` and `ASN` + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * There is no default value for this setting. + +An array of geoip fields to be included in the event. + +Possible fields depend on the database type. By default, all geoip fields +are included in the event. + +For the built-in GeoLite2 City database, the following are available: +`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, +`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field containing the IP address or hostname to map via geoip. If +this field is an array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_geoip_lookup_failure"]` + +Tags the event on failure to look up geo information. This can be used in later analysis. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"geoip"` + +Specify the field into which Logstash should store the geoip data. +This can be useful, for example, if you have `src_ip` and `dst_ip` fields and +would like the GeoIP information of both IPs. + +If you save the data to a target field other than `geoip` and want to use the +`geo_point` related functions in Elasticsearch, you need to alter the template +provided with the Elasticsearch output and configure the output to use the +new template. + +Even if you don't use the `geo_point` mapping, the `[target][location]` field +is still valid GeoJSON. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/geoip-v5.0.3.asciidoc b/docs/versioned-plugins/filters/geoip-v5.0.3.asciidoc new file mode 100644 index 000000000..ff12e497c --- /dev/null +++ b/docs/versioned-plugins/filters/geoip-v5.0.3.asciidoc @@ -0,0 +1,184 @@ +:plugin: geoip +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.3 +:release_date: 2017-12-21 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v5.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Geoip filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The GeoIP filter adds information about the geographical location of IP addresses, +based on data from the Maxmind GeoLite2 databases. + +==== Supported Databases + +This plugin is bundled with https://dev.maxmind.com/geoip/geoip2/geolite2[GeoLite2] City database out of the box. From Maxmind's description -- +"GeoLite2 databases are free IP geolocation databases comparable to, but less accurate than, MaxMind’s +GeoIP2 databases". Please see GeoIP Lite2 license for more details. + +https://www.maxmind.com/en/geoip2-databases[Commercial databases] from Maxmind are also supported in this plugin. + +If you need to use databases other than the bundled GeoLite2 City, you can download them directly +from Maxmind's website and use the `database` option to specify their location. The GeoLite2 databases +can be downloaded from https://dev.maxmind.com/geoip/geoip2/geolite2[here]. + +If you would like to get Autonomous System Number(ASN) information, you can use the GeoLite2-ASN database. + +==== Details + +A `[geoip][location]` field is created if +the GeoIP lookup returns a latitude and longitude. The field is stored in +http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, +the default Elasticsearch template provided with the +<> maps +the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. + +As this field is a `geo_point` _and_ it is still valid GeoJSON, you get +the awesomeness of Elasticsearch's geospatial query, facet and filter functions +and the flexibility of having GeoJSON for all other applications (like Kibana's +map visualization). + +[NOTE] +-- +This product includes GeoLite2 data created by MaxMind, available from +http://www.maxmind.com. This database is licensed under +http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. + +Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database +and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy +MaxMind GeoLite database and support IPv4 lookups only. +-- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Geoip Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-default_database_type>> |`City` or `ASN`|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_size"] +===== `cache_size` + + * Value type is <> + * Default value is `1000` + +GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that +IPs agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. +Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter +of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-database"] +===== `database` + + * Value type is <> + * There is no default value for this setting. + +The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. +GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. +GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. + +If not specified, this will default to the GeoLite2 City database that ships +with Logstash. + +[id="{version}-plugins-{type}s-{plugin}-default_database_type"] +===== `default_database_type` + +This plugin now includes both the GeoLite2-City and GeoLite2-ASN databases. If `database` and `default_database_type` are unset, the GeoLite2-City database will be selected. To use the included GeoLite2-ASN database, set `default_database_type` to `ASN`. + + * Value type is <> + * The default value is `City` + * The only acceptable values are `City` and `ASN` + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * There is no default value for this setting. + +An array of geoip fields to be included in the event. + +Possible fields depend on the database type. By default, all geoip fields +are included in the event. + +For the built-in GeoLite2 City database, the following are available: +`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, +`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field containing the IP address or hostname to map via geoip. If +this field is an array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_geoip_lookup_failure"]` + +Tags the event on failure to look up geo information. This can be used in later analysis. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"geoip"` + +Specify the field into which Logstash should store the geoip data. +This can be useful, for example, if you have `src_ip` and `dst_ip` fields and +would like the GeoIP information of both IPs. + +If you save the data to a target field other than `geoip` and want to use the +`geo_point` related functions in Elasticsearch, you need to alter the template +provided with the Elasticsearch output and configure the output to use the +new template. + +Even if you don't use the `geo_point` mapping, the `[target][location]` field +is still valid GeoJSON. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/grok-index.asciidoc b/docs/versioned-plugins/filters/grok-index.asciidoc new file mode 100644 index 000000000..7dcfee5c2 --- /dev/null +++ b/docs/versioned-plugins/filters/grok-index.asciidoc @@ -0,0 +1,22 @@ +:plugin: grok +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-18 +| <> | 2017-11-27 +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +| <> | 2017-05-10 +|======================================================================= + +include::grok-v4.0.1.asciidoc[] +include::grok-v4.0.0.asciidoc[] +include::grok-v3.4.4.asciidoc[] +include::grok-v3.4.3.asciidoc[] +include::grok-v3.4.2.asciidoc[] +include::grok-v3.4.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/grok-v3.4.1.asciidoc b/docs/versioned-plugins/filters/grok-v3.4.1.asciidoc new file mode 100644 index 000000000..1df98144a --- /dev/null +++ b/docs/versioned-plugins/filters/grok-v3.4.1.asciidoc @@ -0,0 +1,332 @@ +:plugin: grok +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.4.1 +:release_date: 2017-05-10 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-grok/blob/v3.4.1/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Grok + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse arbitrary text and structure it. + +Grok is currently the best way in logstash to parse crappy unstructured log +data into something structured and queryable. + +This tool is perfect for syslog logs, apache and other webserver logs, mysql +logs, and in general, any log format that is generally written for humans +and not computer consumption. + +Logstash ships with about 120 patterns by default. You can find them here: +. You can add +your own trivially. (See the `patterns_dir` setting) + +If you need help building patterns to match your logs, you will find the + and applications quite useful! + +==== Grok Basics + +Grok works by combining text patterns into something that matches your +logs. + +The syntax for a grok pattern is `%{SYNTAX:SEMANTIC}` + +The `SYNTAX` is the name of the pattern that will match your text. For +example, `3.44` will be matched by the `NUMBER` pattern and `55.3.244.1` will +be matched by the `IP` pattern. The syntax is how you match. + +The `SEMANTIC` is the identifier you give to the piece of text being matched. +For example, `3.44` could be the duration of an event, so you could call it +simply `duration`. Further, a string `55.3.244.1` might identify the `client` +making a request. + +For the above example, your grok filter would look something like this: +[source,ruby] +%{NUMBER:duration} %{IP:client} + +Optionally you can add a data type conversion to your grok pattern. By default +all semantics are saved as strings. If you wish to convert a semantic's data type, +for example change a string to an integer then suffix it with the target data type. +For example `%{NUMBER:num:int}` which converts the `num` semantic from a string to an +integer. Currently the only supported conversions are `int` and `float`. + +.Examples: + +With that idea of a syntax and semantic, we can pull out useful fields from a +sample log like this fictional http request log: +[source,ruby] + 55.3.244.1 GET /index.html 15824 0.043 + +The pattern for this could be: +[source,ruby] + %{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration} + +A more realistic example, let's read these logs from a file: +[source,ruby] + input { + file { + path => "/var/log/http.log" + } + } + filter { + grok { + match => { "message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}" } + } + } + +After the grok filter, the event will have a few extra fields in it: + +* `client: 55.3.244.1` +* `method: GET` +* `request: /index.html` +* `bytes: 15824` +* `duration: 0.043` + +==== Regular Expressions + +Grok sits on top of regular expressions, so any regular expressions are valid +in grok as well. The regular expression library is Oniguruma, and you can see +the full supported regexp syntax https://github.com/kkos/oniguruma/blob/master/doc/RE[on the Oniguruma +site]. + +==== Custom Patterns + +Sometimes logstash doesn't have a pattern you need. For this, you have +a few options. + +First, you can use the Oniguruma syntax for named capture which will +let you match a piece of text and save it as a field: +[source,ruby] + (?the pattern here) + +For example, postfix logs have a `queue id` that is an 10 or 11-character +hexadecimal value. I can capture that easily like this: +[source,ruby] + (?[0-9A-F]{10,11}) + +Alternately, you can create a custom patterns file. + +* Create a directory called `patterns` with a file in it called `extra` + (the file name doesn't matter, but name it meaningfully for yourself) +* In that file, write the pattern you need as the pattern name, a space, then + the regexp for that pattern. + +For example, doing the postfix queue id example as above: +[source,ruby] + # contents of ./patterns/postfix: + POSTFIX_QUEUEID [0-9A-F]{10,11} + +Then use the `patterns_dir` setting in this plugin to tell logstash where +your custom patterns directory is. Here's a full example with a sample log: +[source,ruby] + Jan 1 06:25:43 mailserver14 postfix/cleanup[21403]: BEF25A72965: message-id=<20130101142543.5828399CCAF@mailserver14.example.com> +[source,ruby] + filter { + grok { + patterns_dir => ["./patterns"] + match => { "message" => "%{SYSLOGBASE} %{POSTFIX_QUEUEID:queue_id}: %{GREEDYDATA:syslog_message}" } + } + } + +The above will match and result in the following fields: + +* `timestamp: Jan 1 06:25:43` +* `logsource: mailserver14` +* `program: postfix/cleanup` +* `pid: 21403` +* `queue_id: BEF25A72965` +* `syslog_message: message-id=<20130101142543.5828399CCAF@mailserver14.example.com>` + +The `timestamp`, `logsource`, `program`, and `pid` fields come from the +`SYSLOGBASE` pattern which itself is defined by other patterns. + +Another option is to define patterns _inline_ in the filter using `pattern_definitions`. +This is mostly for convenience and allows user to define a pattern which can be used just in that +filter. This newly defined patterns in `pattern_definitions` will not be available outside of that particular `grok` filter. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Grok Filter Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-break_on_match"] +===== `break_on_match` + + * Value type is <> + * Default value is `true` + +Break on first match. The first successful match by grok will result in the +filter being finished. If you want grok to try all patterns (maybe you are +parsing different things), then set this to false. + +[id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] +===== `keep_empty_captures` + + * Value type is <> + * Default value is `false` + +If `true`, keep empty captures as event fields. + +[id="{version}-plugins-{type}s-{plugin}-match"] +===== `match` + + * Value type is <> + * Default value is `{}` + +A hash of matches of field => value + +For example: +[source,ruby] + filter { + grok { match => { "message" => "Duration: %{NUMBER:duration}" } } + } + +If you need to match multiple patterns against a single field, the value can be an array of patterns +[source,ruby] + filter { + grok { match => { "message" => [ "Duration: %{NUMBER:duration}", "Speed: %{NUMBER:speed}" ] } } + } + + +[id="{version}-plugins-{type}s-{plugin}-named_captures_only"] +===== `named_captures_only` + + * Value type is <> + * Default value is `true` + +If `true`, only store named captures from grok. + +[id="{version}-plugins-{type}s-{plugin}-overwrite"] +===== `overwrite` + + * Value type is <> + * Default value is `[]` + +The fields to overwrite. + +This allows you to overwrite a value in a field that already exists. + +For example, if you have a syslog line in the `message` field, you can +overwrite the `message` field with part of the match like so: +[source,ruby] + filter { + grok { + match => { "message" => "%{SYSLOGBASE} %{DATA:message}" } + overwrite => [ "message" ] + } + } + +In this case, a line like `May 29 16:37:11 sadness logger: hello world` +will be parsed and `hello world` will overwrite the original message. + +[id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] +===== `pattern_definitions` + + * Value type is <> + * Default value is `{}` + +A hash of pattern-name and pattern tuples defining custom patterns to be used by +the current filter. Patterns matching existing names will override the pre-existing +definition. Think of this as inline patterns available just for this definition of +grok + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is <> + * Default value is `[]` + + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. You can point to multiple pattern directories using this setting. +Note that Grok will read all files in the directory matching the patterns_files_glob +and assume it's a pattern file (including any tilde backup files). +[source,ruby] + patterns_dir => ["/opt/logstash/patterns", "/opt/logstash/extra_patterns"] + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +The patterns are loaded when the pipeline is created. + +[id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] +===== `patterns_files_glob` + + * Value type is <> + * Default value is `"*"` + +Glob pattern, used to select the pattern files in the directories +specified by patterns_dir + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_grokparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] +===== `tag_on_timeout` + + * Value type is <> + * Default value is `"_groktimeout"` + +Tag to apply if a grok regexp times out. + +[id="{version}-plugins-{type}s-{plugin}-timeout_millis"] +===== `timeout_millis` + + * Value type is <> + * Default value is `30000` + +Attempt to terminate regexps after this amount of time. +This applies per pattern if multiple patterns are applied +This will never timeout early, but may take a little longer to timeout. +Actual timeout is approximate based on a 250ms quantization. +Set to 0 to disable timeouts + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/grok-v3.4.2.asciidoc b/docs/versioned-plugins/filters/grok-v3.4.2.asciidoc new file mode 100644 index 000000000..ecf226d27 --- /dev/null +++ b/docs/versioned-plugins/filters/grok-v3.4.2.asciidoc @@ -0,0 +1,333 @@ +:plugin: grok +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.4.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-grok/blob/v3.4.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Grok filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse arbitrary text and structure it. + +Grok is currently the best way in logstash to parse crappy unstructured log +data into something structured and queryable. + +This tool is perfect for syslog logs, apache and other webserver logs, mysql +logs, and in general, any log format that is generally written for humans +and not computer consumption. + +Logstash ships with about 120 patterns by default. You can find them here: +. You can add +your own trivially. (See the `patterns_dir` setting) + +If you need help building patterns to match your logs, you will find the + and applications quite useful! + +==== Grok Basics + +Grok works by combining text patterns into something that matches your +logs. + +The syntax for a grok pattern is `%{SYNTAX:SEMANTIC}` + +The `SYNTAX` is the name of the pattern that will match your text. For +example, `3.44` will be matched by the `NUMBER` pattern and `55.3.244.1` will +be matched by the `IP` pattern. The syntax is how you match. + +The `SEMANTIC` is the identifier you give to the piece of text being matched. +For example, `3.44` could be the duration of an event, so you could call it +simply `duration`. Further, a string `55.3.244.1` might identify the `client` +making a request. + +For the above example, your grok filter would look something like this: +[source,ruby] +%{NUMBER:duration} %{IP:client} + +Optionally you can add a data type conversion to your grok pattern. By default +all semantics are saved as strings. If you wish to convert a semantic's data type, +for example change a string to an integer then suffix it with the target data type. +For example `%{NUMBER:num:int}` which converts the `num` semantic from a string to an +integer. Currently the only supported conversions are `int` and `float`. + +.Examples: + +With that idea of a syntax and semantic, we can pull out useful fields from a +sample log like this fictional http request log: +[source,ruby] + 55.3.244.1 GET /index.html 15824 0.043 + +The pattern for this could be: +[source,ruby] + %{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration} + +A more realistic example, let's read these logs from a file: +[source,ruby] + input { + file { + path => "/var/log/http.log" + } + } + filter { + grok { + match => { "message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}" } + } + } + +After the grok filter, the event will have a few extra fields in it: + +* `client: 55.3.244.1` +* `method: GET` +* `request: /index.html` +* `bytes: 15824` +* `duration: 0.043` + +==== Regular Expressions + +Grok sits on top of regular expressions, so any regular expressions are valid +in grok as well. The regular expression library is Oniguruma, and you can see +the full supported regexp syntax https://github.com/kkos/oniguruma/blob/master/doc/RE[on the Oniguruma +site]. + +==== Custom Patterns + +Sometimes logstash doesn't have a pattern you need. For this, you have +a few options. + +First, you can use the Oniguruma syntax for named capture which will +let you match a piece of text and save it as a field: +[source,ruby] + (?the pattern here) + +For example, postfix logs have a `queue id` that is an 10 or 11-character +hexadecimal value. I can capture that easily like this: +[source,ruby] + (?[0-9A-F]{10,11}) + +Alternately, you can create a custom patterns file. + +* Create a directory called `patterns` with a file in it called `extra` + (the file name doesn't matter, but name it meaningfully for yourself) +* In that file, write the pattern you need as the pattern name, a space, then + the regexp for that pattern. + +For example, doing the postfix queue id example as above: +[source,ruby] + # contents of ./patterns/postfix: + POSTFIX_QUEUEID [0-9A-F]{10,11} + +Then use the `patterns_dir` setting in this plugin to tell logstash where +your custom patterns directory is. Here's a full example with a sample log: +[source,ruby] + Jan 1 06:25:43 mailserver14 postfix/cleanup[21403]: BEF25A72965: message-id=<20130101142543.5828399CCAF@mailserver14.example.com> +[source,ruby] + filter { + grok { + patterns_dir => ["./patterns"] + match => { "message" => "%{SYSLOGBASE} %{POSTFIX_QUEUEID:queue_id}: %{GREEDYDATA:syslog_message}" } + } + } + +The above will match and result in the following fields: + +* `timestamp: Jan 1 06:25:43` +* `logsource: mailserver14` +* `program: postfix/cleanup` +* `pid: 21403` +* `queue_id: BEF25A72965` +* `syslog_message: message-id=<20130101142543.5828399CCAF@mailserver14.example.com>` + +The `timestamp`, `logsource`, `program`, and `pid` fields come from the +`SYSLOGBASE` pattern which itself is defined by other patterns. + +Another option is to define patterns _inline_ in the filter using `pattern_definitions`. +This is mostly for convenience and allows user to define a pattern which can be used just in that +filter. This newly defined patterns in `pattern_definitions` will not be available outside of that particular `grok` filter. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Grok Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-break_on_match"] +===== `break_on_match` + + * Value type is <> + * Default value is `true` + +Break on first match. The first successful match by grok will result in the +filter being finished. If you want grok to try all patterns (maybe you are +parsing different things), then set this to false. + +[id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] +===== `keep_empty_captures` + + * Value type is <> + * Default value is `false` + +If `true`, keep empty captures as event fields. + +[id="{version}-plugins-{type}s-{plugin}-match"] +===== `match` + + * Value type is <> + * Default value is `{}` + +A hash of matches of field => value + +For example: +[source,ruby] + filter { + grok { match => { "message" => "Duration: %{NUMBER:duration}" } } + } + +If you need to match multiple patterns against a single field, the value can be an array of patterns +[source,ruby] + filter { + grok { match => { "message" => [ "Duration: %{NUMBER:duration}", "Speed: %{NUMBER:speed}" ] } } + } + + +[id="{version}-plugins-{type}s-{plugin}-named_captures_only"] +===== `named_captures_only` + + * Value type is <> + * Default value is `true` + +If `true`, only store named captures from grok. + +[id="{version}-plugins-{type}s-{plugin}-overwrite"] +===== `overwrite` + + * Value type is <> + * Default value is `[]` + +The fields to overwrite. + +This allows you to overwrite a value in a field that already exists. + +For example, if you have a syslog line in the `message` field, you can +overwrite the `message` field with part of the match like so: +[source,ruby] + filter { + grok { + match => { "message" => "%{SYSLOGBASE} %{DATA:message}" } + overwrite => [ "message" ] + } + } + +In this case, a line like `May 29 16:37:11 sadness logger: hello world` +will be parsed and `hello world` will overwrite the original message. + +[id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] +===== `pattern_definitions` + + * Value type is <> + * Default value is `{}` + +A hash of pattern-name and pattern tuples defining custom patterns to be used by +the current filter. Patterns matching existing names will override the pre-existing +definition. Think of this as inline patterns available just for this definition of +grok + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is <> + * Default value is `[]` + + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. You can point to multiple pattern directories using this setting. +Note that Grok will read all files in the directory matching the patterns_files_glob +and assume it's a pattern file (including any tilde backup files). +[source,ruby] + patterns_dir => ["/opt/logstash/patterns", "/opt/logstash/extra_patterns"] + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +The patterns are loaded when the pipeline is created. + +[id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] +===== `patterns_files_glob` + + * Value type is <> + * Default value is `"*"` + +Glob pattern, used to select the pattern files in the directories +specified by patterns_dir + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_grokparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] +===== `tag_on_timeout` + + * Value type is <> + * Default value is `"_groktimeout"` + +Tag to apply if a grok regexp times out. + +[id="{version}-plugins-{type}s-{plugin}-timeout_millis"] +===== `timeout_millis` + + * Value type is <> + * Default value is `30000` + +Attempt to terminate regexps after this amount of time. +This applies per pattern if multiple patterns are applied +This will never timeout early, but may take a little longer to timeout. +Actual timeout is approximate based on a 250ms quantization. +Set to 0 to disable timeouts + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/grok-v3.4.3.asciidoc b/docs/versioned-plugins/filters/grok-v3.4.3.asciidoc new file mode 100644 index 000000000..65043d69c --- /dev/null +++ b/docs/versioned-plugins/filters/grok-v3.4.3.asciidoc @@ -0,0 +1,332 @@ +:plugin: grok +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.4.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-grok/blob/v3.4.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Grok filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse arbitrary text and structure it. + +Grok is a great way to parse unstructured log data into something structured and queryable. + +This tool is perfect for syslog logs, apache and other webserver logs, mysql +logs, and in general, any log format that is generally written for humans +and not computer consumption. + +Logstash ships with about 120 patterns by default. You can find them here: +. You can add +your own trivially. (See the `patterns_dir` setting) + +If you need help building patterns to match your logs, you will find the + and applications quite useful! + +==== Grok Basics + +Grok works by combining text patterns into something that matches your +logs. + +The syntax for a grok pattern is `%{SYNTAX:SEMANTIC}` + +The `SYNTAX` is the name of the pattern that will match your text. For +example, `3.44` will be matched by the `NUMBER` pattern and `55.3.244.1` will +be matched by the `IP` pattern. The syntax is how you match. + +The `SEMANTIC` is the identifier you give to the piece of text being matched. +For example, `3.44` could be the duration of an event, so you could call it +simply `duration`. Further, a string `55.3.244.1` might identify the `client` +making a request. + +For the above example, your grok filter would look something like this: +[source,ruby] +%{NUMBER:duration} %{IP:client} + +Optionally you can add a data type conversion to your grok pattern. By default +all semantics are saved as strings. If you wish to convert a semantic's data type, +for example change a string to an integer then suffix it with the target data type. +For example `%{NUMBER:num:int}` which converts the `num` semantic from a string to an +integer. Currently the only supported conversions are `int` and `float`. + +.Examples: + +With that idea of a syntax and semantic, we can pull out useful fields from a +sample log like this fictional http request log: +[source,ruby] + 55.3.244.1 GET /index.html 15824 0.043 + +The pattern for this could be: +[source,ruby] + %{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration} + +A more realistic example, let's read these logs from a file: +[source,ruby] + input { + file { + path => "/var/log/http.log" + } + } + filter { + grok { + match => { "message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}" } + } + } + +After the grok filter, the event will have a few extra fields in it: + +* `client: 55.3.244.1` +* `method: GET` +* `request: /index.html` +* `bytes: 15824` +* `duration: 0.043` + +==== Regular Expressions + +Grok sits on top of regular expressions, so any regular expressions are valid +in grok as well. The regular expression library is Oniguruma, and you can see +the full supported regexp syntax https://github.com/kkos/oniguruma/blob/master/doc/RE[on the Oniguruma +site]. + +==== Custom Patterns + +Sometimes logstash doesn't have a pattern you need. For this, you have +a few options. + +First, you can use the Oniguruma syntax for named capture which will +let you match a piece of text and save it as a field: +[source,ruby] + (?the pattern here) + +For example, postfix logs have a `queue id` that is an 10 or 11-character +hexadecimal value. I can capture that easily like this: +[source,ruby] + (?[0-9A-F]{10,11}) + +Alternately, you can create a custom patterns file. + +* Create a directory called `patterns` with a file in it called `extra` + (the file name doesn't matter, but name it meaningfully for yourself) +* In that file, write the pattern you need as the pattern name, a space, then + the regexp for that pattern. + +For example, doing the postfix queue id example as above: +[source,ruby] + # contents of ./patterns/postfix: + POSTFIX_QUEUEID [0-9A-F]{10,11} + +Then use the `patterns_dir` setting in this plugin to tell logstash where +your custom patterns directory is. Here's a full example with a sample log: +[source,ruby] + Jan 1 06:25:43 mailserver14 postfix/cleanup[21403]: BEF25A72965: message-id=<20130101142543.5828399CCAF@mailserver14.example.com> +[source,ruby] + filter { + grok { + patterns_dir => ["./patterns"] + match => { "message" => "%{SYSLOGBASE} %{POSTFIX_QUEUEID:queue_id}: %{GREEDYDATA:syslog_message}" } + } + } + +The above will match and result in the following fields: + +* `timestamp: Jan 1 06:25:43` +* `logsource: mailserver14` +* `program: postfix/cleanup` +* `pid: 21403` +* `queue_id: BEF25A72965` +* `syslog_message: message-id=<20130101142543.5828399CCAF@mailserver14.example.com>` + +The `timestamp`, `logsource`, `program`, and `pid` fields come from the +`SYSLOGBASE` pattern which itself is defined by other patterns. + +Another option is to define patterns _inline_ in the filter using `pattern_definitions`. +This is mostly for convenience and allows user to define a pattern which can be used just in that +filter. This newly defined patterns in `pattern_definitions` will not be available outside of that particular `grok` filter. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Grok Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-break_on_match"] +===== `break_on_match` + + * Value type is <> + * Default value is `true` + +Break on first match. The first successful match by grok will result in the +filter being finished. If you want grok to try all patterns (maybe you are +parsing different things), then set this to false. + +[id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] +===== `keep_empty_captures` + + * Value type is <> + * Default value is `false` + +If `true`, keep empty captures as event fields. + +[id="{version}-plugins-{type}s-{plugin}-match"] +===== `match` + + * Value type is <> + * Default value is `{}` + +A hash of matches of field => value + +For example: +[source,ruby] + filter { + grok { match => { "message" => "Duration: %{NUMBER:duration}" } } + } + +If you need to match multiple patterns against a single field, the value can be an array of patterns +[source,ruby] + filter { + grok { match => { "message" => [ "Duration: %{NUMBER:duration}", "Speed: %{NUMBER:speed}" ] } } + } + + +[id="{version}-plugins-{type}s-{plugin}-named_captures_only"] +===== `named_captures_only` + + * Value type is <> + * Default value is `true` + +If `true`, only store named captures from grok. + +[id="{version}-plugins-{type}s-{plugin}-overwrite"] +===== `overwrite` + + * Value type is <> + * Default value is `[]` + +The fields to overwrite. + +This allows you to overwrite a value in a field that already exists. + +For example, if you have a syslog line in the `message` field, you can +overwrite the `message` field with part of the match like so: +[source,ruby] + filter { + grok { + match => { "message" => "%{SYSLOGBASE} %{DATA:message}" } + overwrite => [ "message" ] + } + } + +In this case, a line like `May 29 16:37:11 sadness logger: hello world` +will be parsed and `hello world` will overwrite the original message. + +[id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] +===== `pattern_definitions` + + * Value type is <> + * Default value is `{}` + +A hash of pattern-name and pattern tuples defining custom patterns to be used by +the current filter. Patterns matching existing names will override the pre-existing +definition. Think of this as inline patterns available just for this definition of +grok + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is <> + * Default value is `[]` + + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. You can point to multiple pattern directories using this setting. +Note that Grok will read all files in the directory matching the patterns_files_glob +and assume it's a pattern file (including any tilde backup files). +[source,ruby] + patterns_dir => ["/opt/logstash/patterns", "/opt/logstash/extra_patterns"] + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +The patterns are loaded when the pipeline is created. + +[id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] +===== `patterns_files_glob` + + * Value type is <> + * Default value is `"*"` + +Glob pattern, used to select the pattern files in the directories +specified by patterns_dir + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_grokparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] +===== `tag_on_timeout` + + * Value type is <> + * Default value is `"_groktimeout"` + +Tag to apply if a grok regexp times out. + +[id="{version}-plugins-{type}s-{plugin}-timeout_millis"] +===== `timeout_millis` + + * Value type is <> + * Default value is `30000` + +Attempt to terminate regexps after this amount of time. +This applies per pattern if multiple patterns are applied +This will never timeout early, but may take a little longer to timeout. +Actual timeout is approximate based on a 250ms quantization. +Set to 0 to disable timeouts + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/grok-v3.4.4.asciidoc b/docs/versioned-plugins/filters/grok-v3.4.4.asciidoc new file mode 100644 index 000000000..55a2a504b --- /dev/null +++ b/docs/versioned-plugins/filters/grok-v3.4.4.asciidoc @@ -0,0 +1,332 @@ +:plugin: grok +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.4.4 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-grok/blob/v3.4.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Grok filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse arbitrary text and structure it. + +Grok is a great way to parse unstructured log data into something structured and queryable. + +This tool is perfect for syslog logs, apache and other webserver logs, mysql +logs, and in general, any log format that is generally written for humans +and not computer consumption. + +Logstash ships with about 120 patterns by default. You can find them here: +. You can add +your own trivially. (See the `patterns_dir` setting) + +If you need help building patterns to match your logs, you will find the + and applications quite useful! + +==== Grok Basics + +Grok works by combining text patterns into something that matches your +logs. + +The syntax for a grok pattern is `%{SYNTAX:SEMANTIC}` + +The `SYNTAX` is the name of the pattern that will match your text. For +example, `3.44` will be matched by the `NUMBER` pattern and `55.3.244.1` will +be matched by the `IP` pattern. The syntax is how you match. + +The `SEMANTIC` is the identifier you give to the piece of text being matched. +For example, `3.44` could be the duration of an event, so you could call it +simply `duration`. Further, a string `55.3.244.1` might identify the `client` +making a request. + +For the above example, your grok filter would look something like this: +[source,ruby] +%{NUMBER:duration} %{IP:client} + +Optionally you can add a data type conversion to your grok pattern. By default +all semantics are saved as strings. If you wish to convert a semantic's data type, +for example change a string to an integer then suffix it with the target data type. +For example `%{NUMBER:num:int}` which converts the `num` semantic from a string to an +integer. Currently the only supported conversions are `int` and `float`. + +.Examples: + +With that idea of a syntax and semantic, we can pull out useful fields from a +sample log like this fictional http request log: +[source,ruby] + 55.3.244.1 GET /index.html 15824 0.043 + +The pattern for this could be: +[source,ruby] + %{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration} + +A more realistic example, let's read these logs from a file: +[source,ruby] + input { + file { + path => "/var/log/http.log" + } + } + filter { + grok { + match => { "message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}" } + } + } + +After the grok filter, the event will have a few extra fields in it: + +* `client: 55.3.244.1` +* `method: GET` +* `request: /index.html` +* `bytes: 15824` +* `duration: 0.043` + +==== Regular Expressions + +Grok sits on top of regular expressions, so any regular expressions are valid +in grok as well. The regular expression library is Oniguruma, and you can see +the full supported regexp syntax https://github.com/kkos/oniguruma/blob/master/doc/RE[on the Oniguruma +site]. + +==== Custom Patterns + +Sometimes logstash doesn't have a pattern you need. For this, you have +a few options. + +First, you can use the Oniguruma syntax for named capture which will +let you match a piece of text and save it as a field: +[source,ruby] + (?the pattern here) + +For example, postfix logs have a `queue id` that is an 10 or 11-character +hexadecimal value. I can capture that easily like this: +[source,ruby] + (?[0-9A-F]{10,11}) + +Alternately, you can create a custom patterns file. + +* Create a directory called `patterns` with a file in it called `extra` + (the file name doesn't matter, but name it meaningfully for yourself) +* In that file, write the pattern you need as the pattern name, a space, then + the regexp for that pattern. + +For example, doing the postfix queue id example as above: +[source,ruby] + # contents of ./patterns/postfix: + POSTFIX_QUEUEID [0-9A-F]{10,11} + +Then use the `patterns_dir` setting in this plugin to tell logstash where +your custom patterns directory is. Here's a full example with a sample log: +[source,ruby] + Jan 1 06:25:43 mailserver14 postfix/cleanup[21403]: BEF25A72965: message-id=<20130101142543.5828399CCAF@mailserver14.example.com> +[source,ruby] + filter { + grok { + patterns_dir => ["./patterns"] + match => { "message" => "%{SYSLOGBASE} %{POSTFIX_QUEUEID:queue_id}: %{GREEDYDATA:syslog_message}" } + } + } + +The above will match and result in the following fields: + +* `timestamp: Jan 1 06:25:43` +* `logsource: mailserver14` +* `program: postfix/cleanup` +* `pid: 21403` +* `queue_id: BEF25A72965` +* `syslog_message: message-id=<20130101142543.5828399CCAF@mailserver14.example.com>` + +The `timestamp`, `logsource`, `program`, and `pid` fields come from the +`SYSLOGBASE` pattern which itself is defined by other patterns. + +Another option is to define patterns _inline_ in the filter using `pattern_definitions`. +This is mostly for convenience and allows user to define a pattern which can be used just in that +filter. This newly defined patterns in `pattern_definitions` will not be available outside of that particular `grok` filter. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Grok Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-break_on_match"] +===== `break_on_match` + + * Value type is <> + * Default value is `true` + +Break on first match. The first successful match by grok will result in the +filter being finished. If you want grok to try all patterns (maybe you are +parsing different things), then set this to false. + +[id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] +===== `keep_empty_captures` + + * Value type is <> + * Default value is `false` + +If `true`, keep empty captures as event fields. + +[id="{version}-plugins-{type}s-{plugin}-match"] +===== `match` + + * Value type is <> + * Default value is `{}` + +A hash of matches of field => value + +For example: +[source,ruby] + filter { + grok { match => { "message" => "Duration: %{NUMBER:duration}" } } + } + +If you need to match multiple patterns against a single field, the value can be an array of patterns +[source,ruby] + filter { + grok { match => { "message" => [ "Duration: %{NUMBER:duration}", "Speed: %{NUMBER:speed}" ] } } + } + + +[id="{version}-plugins-{type}s-{plugin}-named_captures_only"] +===== `named_captures_only` + + * Value type is <> + * Default value is `true` + +If `true`, only store named captures from grok. + +[id="{version}-plugins-{type}s-{plugin}-overwrite"] +===== `overwrite` + + * Value type is <> + * Default value is `[]` + +The fields to overwrite. + +This allows you to overwrite a value in a field that already exists. + +For example, if you have a syslog line in the `message` field, you can +overwrite the `message` field with part of the match like so: +[source,ruby] + filter { + grok { + match => { "message" => "%{SYSLOGBASE} %{DATA:message}" } + overwrite => [ "message" ] + } + } + +In this case, a line like `May 29 16:37:11 sadness logger: hello world` +will be parsed and `hello world` will overwrite the original message. + +[id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] +===== `pattern_definitions` + + * Value type is <> + * Default value is `{}` + +A hash of pattern-name and pattern tuples defining custom patterns to be used by +the current filter. Patterns matching existing names will override the pre-existing +definition. Think of this as inline patterns available just for this definition of +grok + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is <> + * Default value is `[]` + + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. You can point to multiple pattern directories using this setting. +Note that Grok will read all files in the directory matching the patterns_files_glob +and assume it's a pattern file (including any tilde backup files). +[source,ruby] + patterns_dir => ["/opt/logstash/patterns", "/opt/logstash/extra_patterns"] + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +The patterns are loaded when the pipeline is created. + +[id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] +===== `patterns_files_glob` + + * Value type is <> + * Default value is `"*"` + +Glob pattern, used to select the pattern files in the directories +specified by patterns_dir + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_grokparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] +===== `tag_on_timeout` + + * Value type is <> + * Default value is `"_groktimeout"` + +Tag to apply if a grok regexp times out. + +[id="{version}-plugins-{type}s-{plugin}-timeout_millis"] +===== `timeout_millis` + + * Value type is <> + * Default value is `30000` + +Attempt to terminate regexps after this amount of time. +This applies per pattern if multiple patterns are applied +This will never timeout early, but may take a little longer to timeout. +Actual timeout is approximate based on a 250ms quantization. +Set to 0 to disable timeouts + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/grok-v4.0.0.asciidoc b/docs/versioned-plugins/filters/grok-v4.0.0.asciidoc new file mode 100644 index 000000000..f81da7ea2 --- /dev/null +++ b/docs/versioned-plugins/filters/grok-v4.0.0.asciidoc @@ -0,0 +1,332 @@ +:plugin: grok +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.0 +:release_date: 2017-11-27 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-grok/blob/v4.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Grok filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse arbitrary text and structure it. + +Grok is a great way to parse unstructured log data into something structured and queryable. + +This tool is perfect for syslog logs, apache and other webserver logs, mysql +logs, and in general, any log format that is generally written for humans +and not computer consumption. + +Logstash ships with about 120 patterns by default. You can find them here: +. You can add +your own trivially. (See the `patterns_dir` setting) + +If you need help building patterns to match your logs, you will find the + and applications quite useful! + +==== Grok Basics + +Grok works by combining text patterns into something that matches your +logs. + +The syntax for a grok pattern is `%{SYNTAX:SEMANTIC}` + +The `SYNTAX` is the name of the pattern that will match your text. For +example, `3.44` will be matched by the `NUMBER` pattern and `55.3.244.1` will +be matched by the `IP` pattern. The syntax is how you match. + +The `SEMANTIC` is the identifier you give to the piece of text being matched. +For example, `3.44` could be the duration of an event, so you could call it +simply `duration`. Further, a string `55.3.244.1` might identify the `client` +making a request. + +For the above example, your grok filter would look something like this: +[source,ruby] +%{NUMBER:duration} %{IP:client} + +Optionally you can add a data type conversion to your grok pattern. By default +all semantics are saved as strings. If you wish to convert a semantic's data type, +for example change a string to an integer then suffix it with the target data type. +For example `%{NUMBER:num:int}` which converts the `num` semantic from a string to an +integer. Currently the only supported conversions are `int` and `float`. + +.Examples: + +With that idea of a syntax and semantic, we can pull out useful fields from a +sample log like this fictional http request log: +[source,ruby] + 55.3.244.1 GET /index.html 15824 0.043 + +The pattern for this could be: +[source,ruby] + %{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration} + +A more realistic example, let's read these logs from a file: +[source,ruby] + input { + file { + path => "/var/log/http.log" + } + } + filter { + grok { + match => { "message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}" } + } + } + +After the grok filter, the event will have a few extra fields in it: + +* `client: 55.3.244.1` +* `method: GET` +* `request: /index.html` +* `bytes: 15824` +* `duration: 0.043` + +==== Regular Expressions + +Grok sits on top of regular expressions, so any regular expressions are valid +in grok as well. The regular expression library is Oniguruma, and you can see +the full supported regexp syntax https://github.com/kkos/oniguruma/blob/master/doc/RE[on the Oniguruma +site]. + +==== Custom Patterns + +Sometimes logstash doesn't have a pattern you need. For this, you have +a few options. + +First, you can use the Oniguruma syntax for named capture which will +let you match a piece of text and save it as a field: +[source,ruby] + (?the pattern here) + +For example, postfix logs have a `queue id` that is an 10 or 11-character +hexadecimal value. I can capture that easily like this: +[source,ruby] + (?[0-9A-F]{10,11}) + +Alternately, you can create a custom patterns file. + +* Create a directory called `patterns` with a file in it called `extra` + (the file name doesn't matter, but name it meaningfully for yourself) +* In that file, write the pattern you need as the pattern name, a space, then + the regexp for that pattern. + +For example, doing the postfix queue id example as above: +[source,ruby] + # contents of ./patterns/postfix: + POSTFIX_QUEUEID [0-9A-F]{10,11} + +Then use the `patterns_dir` setting in this plugin to tell logstash where +your custom patterns directory is. Here's a full example with a sample log: +[source,ruby] + Jan 1 06:25:43 mailserver14 postfix/cleanup[21403]: BEF25A72965: message-id=<20130101142543.5828399CCAF@mailserver14.example.com> +[source,ruby] + filter { + grok { + patterns_dir => ["./patterns"] + match => { "message" => "%{SYSLOGBASE} %{POSTFIX_QUEUEID:queue_id}: %{GREEDYDATA:syslog_message}" } + } + } + +The above will match and result in the following fields: + +* `timestamp: Jan 1 06:25:43` +* `logsource: mailserver14` +* `program: postfix/cleanup` +* `pid: 21403` +* `queue_id: BEF25A72965` +* `syslog_message: message-id=<20130101142543.5828399CCAF@mailserver14.example.com>` + +The `timestamp`, `logsource`, `program`, and `pid` fields come from the +`SYSLOGBASE` pattern which itself is defined by other patterns. + +Another option is to define patterns _inline_ in the filter using `pattern_definitions`. +This is mostly for convenience and allows user to define a pattern which can be used just in that +filter. This newly defined patterns in `pattern_definitions` will not be available outside of that particular `grok` filter. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Grok Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-break_on_match"] +===== `break_on_match` + + * Value type is <> + * Default value is `true` + +Break on first match. The first successful match by grok will result in the +filter being finished. If you want grok to try all patterns (maybe you are +parsing different things), then set this to false. + +[id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] +===== `keep_empty_captures` + + * Value type is <> + * Default value is `false` + +If `true`, keep empty captures as event fields. + +[id="{version}-plugins-{type}s-{plugin}-match"] +===== `match` + + * Value type is <> + * Default value is `{}` + +A hash of matches of field => value + +For example: +[source,ruby] + filter { + grok { match => { "message" => "Duration: %{NUMBER:duration}" } } + } + +If you need to match multiple patterns against a single field, the value can be an array of patterns +[source,ruby] + filter { + grok { match => { "message" => [ "Duration: %{NUMBER:duration}", "Speed: %{NUMBER:speed}" ] } } + } + + +[id="{version}-plugins-{type}s-{plugin}-named_captures_only"] +===== `named_captures_only` + + * Value type is <> + * Default value is `true` + +If `true`, only store named captures from grok. + +[id="{version}-plugins-{type}s-{plugin}-overwrite"] +===== `overwrite` + + * Value type is <> + * Default value is `[]` + +The fields to overwrite. + +This allows you to overwrite a value in a field that already exists. + +For example, if you have a syslog line in the `message` field, you can +overwrite the `message` field with part of the match like so: +[source,ruby] + filter { + grok { + match => { "message" => "%{SYSLOGBASE} %{DATA:message}" } + overwrite => [ "message" ] + } + } + +In this case, a line like `May 29 16:37:11 sadness logger: hello world` +will be parsed and `hello world` will overwrite the original message. + +[id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] +===== `pattern_definitions` + + * Value type is <> + * Default value is `{}` + +A hash of pattern-name and pattern tuples defining custom patterns to be used by +the current filter. Patterns matching existing names will override the pre-existing +definition. Think of this as inline patterns available just for this definition of +grok + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is <> + * Default value is `[]` + + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. You can point to multiple pattern directories using this setting. +Note that Grok will read all files in the directory matching the patterns_files_glob +and assume it's a pattern file (including any tilde backup files). +[source,ruby] + patterns_dir => ["/opt/logstash/patterns", "/opt/logstash/extra_patterns"] + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +The patterns are loaded when the pipeline is created. + +[id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] +===== `patterns_files_glob` + + * Value type is <> + * Default value is `"*"` + +Glob pattern, used to select the pattern files in the directories +specified by patterns_dir + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_grokparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] +===== `tag_on_timeout` + + * Value type is <> + * Default value is `"_groktimeout"` + +Tag to apply if a grok regexp times out. + +[id="{version}-plugins-{type}s-{plugin}-timeout_millis"] +===== `timeout_millis` + + * Value type is <> + * Default value is `30000` + +Attempt to terminate regexps after this amount of time. +This applies per pattern if multiple patterns are applied +This will never timeout early, but may take a little longer to timeout. +Actual timeout is approximate based on a 250ms quantization. +Set to 0 to disable timeouts + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/grok-v4.0.1.asciidoc b/docs/versioned-plugins/filters/grok-v4.0.1.asciidoc new file mode 100644 index 000000000..bde168c91 --- /dev/null +++ b/docs/versioned-plugins/filters/grok-v4.0.1.asciidoc @@ -0,0 +1,332 @@ +:plugin: grok +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.1 +:release_date: 2017-12-18 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-grok/blob/v4.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Grok filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse arbitrary text and structure it. + +Grok is a great way to parse unstructured log data into something structured and queryable. + +This tool is perfect for syslog logs, apache and other webserver logs, mysql +logs, and in general, any log format that is generally written for humans +and not computer consumption. + +Logstash ships with about 120 patterns by default. You can find them here: +. You can add +your own trivially. (See the `patterns_dir` setting) + +If you need help building patterns to match your logs, you will find the + and applications quite useful! + +==== Grok Basics + +Grok works by combining text patterns into something that matches your +logs. + +The syntax for a grok pattern is `%{SYNTAX:SEMANTIC}` + +The `SYNTAX` is the name of the pattern that will match your text. For +example, `3.44` will be matched by the `NUMBER` pattern and `55.3.244.1` will +be matched by the `IP` pattern. The syntax is how you match. + +The `SEMANTIC` is the identifier you give to the piece of text being matched. +For example, `3.44` could be the duration of an event, so you could call it +simply `duration`. Further, a string `55.3.244.1` might identify the `client` +making a request. + +For the above example, your grok filter would look something like this: +[source,ruby] +%{NUMBER:duration} %{IP:client} + +Optionally you can add a data type conversion to your grok pattern. By default +all semantics are saved as strings. If you wish to convert a semantic's data type, +for example change a string to an integer then suffix it with the target data type. +For example `%{NUMBER:num:int}` which converts the `num` semantic from a string to an +integer. Currently the only supported conversions are `int` and `float`. + +.Examples: + +With that idea of a syntax and semantic, we can pull out useful fields from a +sample log like this fictional http request log: +[source,ruby] + 55.3.244.1 GET /index.html 15824 0.043 + +The pattern for this could be: +[source,ruby] + %{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration} + +A more realistic example, let's read these logs from a file: +[source,ruby] + input { + file { + path => "/var/log/http.log" + } + } + filter { + grok { + match => { "message" => "%{IP:client} %{WORD:method} %{URIPATHPARAM:request} %{NUMBER:bytes} %{NUMBER:duration}" } + } + } + +After the grok filter, the event will have a few extra fields in it: + +* `client: 55.3.244.1` +* `method: GET` +* `request: /index.html` +* `bytes: 15824` +* `duration: 0.043` + +==== Regular Expressions + +Grok sits on top of regular expressions, so any regular expressions are valid +in grok as well. The regular expression library is Oniguruma, and you can see +the full supported regexp syntax https://github.com/kkos/oniguruma/blob/master/doc/RE[on the Oniguruma +site]. + +==== Custom Patterns + +Sometimes logstash doesn't have a pattern you need. For this, you have +a few options. + +First, you can use the Oniguruma syntax for named capture which will +let you match a piece of text and save it as a field: +[source,ruby] + (?the pattern here) + +For example, postfix logs have a `queue id` that is an 10 or 11-character +hexadecimal value. I can capture that easily like this: +[source,ruby] + (?[0-9A-F]{10,11}) + +Alternately, you can create a custom patterns file. + +* Create a directory called `patterns` with a file in it called `extra` + (the file name doesn't matter, but name it meaningfully for yourself) +* In that file, write the pattern you need as the pattern name, a space, then + the regexp for that pattern. + +For example, doing the postfix queue id example as above: +[source,ruby] + # contents of ./patterns/postfix: + POSTFIX_QUEUEID [0-9A-F]{10,11} + +Then use the `patterns_dir` setting in this plugin to tell logstash where +your custom patterns directory is. Here's a full example with a sample log: +[source,ruby] + Jan 1 06:25:43 mailserver14 postfix/cleanup[21403]: BEF25A72965: message-id=<20130101142543.5828399CCAF@mailserver14.example.com> +[source,ruby] + filter { + grok { + patterns_dir => ["./patterns"] + match => { "message" => "%{SYSLOGBASE} %{POSTFIX_QUEUEID:queue_id}: %{GREEDYDATA:syslog_message}" } + } + } + +The above will match and result in the following fields: + +* `timestamp: Jan 1 06:25:43` +* `logsource: mailserver14` +* `program: postfix/cleanup` +* `pid: 21403` +* `queue_id: BEF25A72965` +* `syslog_message: message-id=<20130101142543.5828399CCAF@mailserver14.example.com>` + +The `timestamp`, `logsource`, `program`, and `pid` fields come from the +`SYSLOGBASE` pattern which itself is defined by other patterns. + +Another option is to define patterns _inline_ in the filter using `pattern_definitions`. +This is mostly for convenience and allows user to define a pattern which can be used just in that +filter. This newly defined patterns in `pattern_definitions` will not be available outside of that particular `grok` filter. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Grok Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-break_on_match"] +===== `break_on_match` + + * Value type is <> + * Default value is `true` + +Break on first match. The first successful match by grok will result in the +filter being finished. If you want grok to try all patterns (maybe you are +parsing different things), then set this to false. + +[id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] +===== `keep_empty_captures` + + * Value type is <> + * Default value is `false` + +If `true`, keep empty captures as event fields. + +[id="{version}-plugins-{type}s-{plugin}-match"] +===== `match` + + * Value type is <> + * Default value is `{}` + +A hash of matches of field => value + +For example: +[source,ruby] + filter { + grok { match => { "message" => "Duration: %{NUMBER:duration}" } } + } + +If you need to match multiple patterns against a single field, the value can be an array of patterns +[source,ruby] + filter { + grok { match => { "message" => [ "Duration: %{NUMBER:duration}", "Speed: %{NUMBER:speed}" ] } } + } + + +[id="{version}-plugins-{type}s-{plugin}-named_captures_only"] +===== `named_captures_only` + + * Value type is <> + * Default value is `true` + +If `true`, only store named captures from grok. + +[id="{version}-plugins-{type}s-{plugin}-overwrite"] +===== `overwrite` + + * Value type is <> + * Default value is `[]` + +The fields to overwrite. + +This allows you to overwrite a value in a field that already exists. + +For example, if you have a syslog line in the `message` field, you can +overwrite the `message` field with part of the match like so: +[source,ruby] + filter { + grok { + match => { "message" => "%{SYSLOGBASE} %{DATA:message}" } + overwrite => [ "message" ] + } + } + +In this case, a line like `May 29 16:37:11 sadness logger: hello world` +will be parsed and `hello world` will overwrite the original message. + +[id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] +===== `pattern_definitions` + + * Value type is <> + * Default value is `{}` + +A hash of pattern-name and pattern tuples defining custom patterns to be used by +the current filter. Patterns matching existing names will override the pre-existing +definition. Think of this as inline patterns available just for this definition of +grok + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is <> + * Default value is `[]` + + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. You can point to multiple pattern directories using this setting. +Note that Grok will read all files in the directory matching the patterns_files_glob +and assume it's a pattern file (including any tilde backup files). +[source,ruby] + patterns_dir => ["/opt/logstash/patterns", "/opt/logstash/extra_patterns"] + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +The patterns are loaded when the pipeline is created. + +[id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] +===== `patterns_files_glob` + + * Value type is <> + * Default value is `"*"` + +Glob pattern, used to select the pattern files in the directories +specified by patterns_dir + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_grokparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] +===== `tag_on_timeout` + + * Value type is <> + * Default value is `"_groktimeout"` + +Tag to apply if a grok regexp times out. + +[id="{version}-plugins-{type}s-{plugin}-timeout_millis"] +===== `timeout_millis` + + * Value type is <> + * Default value is `30000` + +Attempt to terminate regexps after this amount of time. +This applies per pattern if multiple patterns are applied +This will never timeout early, but may take a little longer to timeout. +Actual timeout is approximate based on a 250ms quantization. +Set to 0 to disable timeouts + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/hashid-index.asciidoc b/docs/versioned-plugins/filters/hashid-index.asciidoc new file mode 100644 index 000000000..34c846d70 --- /dev/null +++ b/docs/versioned-plugins/filters/hashid-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: hashid +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-21 +| <> | 2017-06-23 +|======================================================================= + +include::hashid-v0.1.3.asciidoc[] +include::hashid-v0.1.2.asciidoc[] + diff --git a/docs/versioned-plugins/filters/hashid-v0.1.2.asciidoc b/docs/versioned-plugins/filters/hashid-v0.1.2.asciidoc new file mode 100644 index 000000000..1ce725ca6 --- /dev/null +++ b/docs/versioned-plugins/filters/hashid-v0.1.2.asciidoc @@ -0,0 +1,110 @@ +:plugin: hashid +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v0.1.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-hashid/blob/v0.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Hashid filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter allow you to generate predictable, string encoded hashed keys +based om event contents and timestamp. This can be used to avoid getting +duplicate records indexed into Elasticsearch. + +Hashed keys to be generated based on full or partial hashes and +has the ability to prefix these keys based on the event timestamp in order +to make then largely ordered by timestamp, which tend to lead to increased +indexing performance for event based use cases where data is being indexed +in near real time. + +When used with the timestamp prefix enabled, it should ideally be run after +the date filter has run and populated the @timestamp field. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Hashid Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-add_timestamp_prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hash_bytes_used>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-method>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5"]`|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-add_timestamp_prefix"] +===== `add_timestamp_prefix` + + * Value type is <> + * Default value is `true` + +Use the timestamp to generate an ID prefix + +[id="{version}-plugins-{type}s-{plugin}-hash_bytes_used"] +===== `hash_bytes_used` + + * Value type is <> + * There is no default value for this setting. + +If full hash generated is not to be used, this parameter specifies how many bytes that should be used +If not specified, the full hash will be used + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * Default value is `"hashid"` + +Encryption key to be used when generating cryptographic hashes + +[id="{version}-plugins-{type}s-{plugin}-method"] +===== `method` + + * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5` + * Default value is `"MD5"` + +Hash function to use + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `["message"]` + +Source field(s) to base the hash calculation on + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"hashid"` + +Target field. +Will overwrite current value of a field if it exists. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/hashid-v0.1.3.asciidoc b/docs/versioned-plugins/filters/hashid-v0.1.3.asciidoc new file mode 100644 index 000000000..4a91fd17a --- /dev/null +++ b/docs/versioned-plugins/filters/hashid-v0.1.3.asciidoc @@ -0,0 +1,110 @@ +:plugin: hashid +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v0.1.3 +:release_date: 2017-08-21 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-hashid/blob/v0.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Hashid filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter allow you to generate predictable, string encoded hashed keys +based om event contents and timestamp. This can be used to avoid getting +duplicate records indexed into Elasticsearch. + +Hashed keys to be generated based on full or partial hashes and +has the ability to prefix these keys based on the event timestamp in order +to make then largely ordered by timestamp, which tend to lead to increased +indexing performance for event based use cases where data is being indexed +in near real time. + +When used with the timestamp prefix enabled, it should ideally be run after +the date filter has run and populated the @timestamp field. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Hashid Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-add_timestamp_prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hash_bytes_used>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-method>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5"]`|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-add_timestamp_prefix"] +===== `add_timestamp_prefix` + + * Value type is <> + * Default value is `true` + +Use the timestamp to generate an ID prefix + +[id="{version}-plugins-{type}s-{plugin}-hash_bytes_used"] +===== `hash_bytes_used` + + * Value type is <> + * There is no default value for this setting. + +If full hash generated is not to be used, this parameter specifies how many bytes that should be used +If not specified, the full hash will be used + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * Default value is `"hashid"` + +Encryption key to be used when generating cryptographic hashes + +[id="{version}-plugins-{type}s-{plugin}-method"] +===== `method` + + * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5` + * Default value is `"MD5"` + +Hash function to use + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `["message"]` + +Source field(s) to base the hash calculation on + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"hashid"` + +Target field. +Will overwrite current value of a field if it exists. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/i18n-index.asciidoc b/docs/versioned-plugins/filters/i18n-index.asciidoc new file mode 100644 index 000000000..ee9087e6a --- /dev/null +++ b/docs/versioned-plugins/filters/i18n-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: i18n +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::i18n-v3.0.3.asciidoc[] +include::i18n-v3.0.2.asciidoc[] +include::i18n-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/i18n-v3.0.1.asciidoc b/docs/versioned-plugins/filters/i18n-v3.0.1.asciidoc new file mode 100644 index 000000000..28cb23731 --- /dev/null +++ b/docs/versioned-plugins/filters/i18n-v3.0.1.asciidoc @@ -0,0 +1,62 @@ +:plugin: i18n +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-i18n/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== I18n filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The i18n filter allows you to remove special characters +from a field + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== I18n Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-transliterate>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-transliterate"] +===== `transliterate` + + * Value type is <> + * There is no default value for this setting. + +Replaces non-ASCII characters with an ASCII approximation, or +if none exists, a replacement character which defaults to `?` + +Example: +[source,ruby] + filter { + i18n { + transliterate => ["field1", "field2"] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/i18n-v3.0.2.asciidoc b/docs/versioned-plugins/filters/i18n-v3.0.2.asciidoc new file mode 100644 index 000000000..eb826214b --- /dev/null +++ b/docs/versioned-plugins/filters/i18n-v3.0.2.asciidoc @@ -0,0 +1,62 @@ +:plugin: i18n +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-i18n/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== I18n filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The i18n filter allows you to remove special characters +from a field + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== I18n Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-transliterate>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-transliterate"] +===== `transliterate` + + * Value type is <> + * There is no default value for this setting. + +Replaces non-ASCII characters with an ASCII approximation, or +if none exists, a replacement character which defaults to `?` + +Example: +[source,ruby] + filter { + i18n { + transliterate => ["field1", "field2"] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/i18n-v3.0.3.asciidoc b/docs/versioned-plugins/filters/i18n-v3.0.3.asciidoc new file mode 100644 index 000000000..f3bf27226 --- /dev/null +++ b/docs/versioned-plugins/filters/i18n-v3.0.3.asciidoc @@ -0,0 +1,62 @@ +:plugin: i18n +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-i18n/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== I18n filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The i18n filter allows you to remove special characters +from a field + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== I18n Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-transliterate>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-transliterate"] +===== `transliterate` + + * Value type is <> + * There is no default value for this setting. + +Replaces non-ASCII characters with an ASCII approximation, or +if none exists, a replacement character which defaults to `?` + +Example: +[source,ruby] + filter { + i18n { + transliterate => ["field1", "field2"] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/jdbc_static-index.asciidoc b/docs/versioned-plugins/filters/jdbc_static-index.asciidoc new file mode 100644 index 000000000..01d19d5b2 --- /dev/null +++ b/docs/versioned-plugins/filters/jdbc_static-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: jdbc_static +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/filters/jdbc_streaming-index.asciidoc b/docs/versioned-plugins/filters/jdbc_streaming-index.asciidoc new file mode 100644 index 000000000..2c6d66679 --- /dev/null +++ b/docs/versioned-plugins/filters/jdbc_streaming-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: jdbc_streaming +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::jdbc_streaming-v1.0.3.asciidoc[] +include::jdbc_streaming-v1.0.2.asciidoc[] +include::jdbc_streaming-v1.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/jdbc_streaming-v1.0.1.asciidoc b/docs/versioned-plugins/filters/jdbc_streaming-v1.0.1.asciidoc new file mode 100644 index 000000000..fc62ca166 --- /dev/null +++ b/docs/versioned-plugins/filters/jdbc_streaming-v1.0.1.asciidoc @@ -0,0 +1,226 @@ +:plugin: jdbc_streaming +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-jdbc_streaming/blob/v1.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Jdbc_streaming filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter executes a SQL query and store the result set in the field +specified as `target`. +It will cache the results locally in an LRU cache with expiry + +For example you can load a row based on an id from in the event + +[source,ruby] +filter { + jdbc_streaming { + jdbc_driver_library => "/path/to/mysql-connector-java-5.1.34-bin.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => ""jdbc:mysql://localhost:3306/mydatabase" + jdbc_user => "me" + jdbc_password => "secret" + statement => "select * from WORLD.COUNTRY WHERE Code = :code" + parameters => { "code" => "country_code"} + target => "country_details" + } +} + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jdbc_streaming Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_expiration>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-default_hash>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_default_use>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-use_cache>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_expiration"] +===== `cache_expiration` + + * Value type is <> + * Default value is `5.0` + +The minimum number of seconds any entry should remain in the cache, defaults to 5 seconds +A numeric value, you can use decimals for example `{ "cache_expiration" => 0.25 }` +If there are transient jdbc errors the cache will store empty results for a given +parameter set and bypass the jbdc lookup, this merges the default_hash into the event, until +the cache entry expires, then the jdbc lookup will be tried again for the same parameters +Conversely, while the cache contains valid results any external problem that would cause +jdbc errors, will not be noticed for the cache_expiration period. + +[id="{version}-plugins-{type}s-{plugin}-cache_size"] +===== `cache_size` + + * Value type is <> + * Default value is `500` + +The maximum number of cache entries are stored, defaults to 500 entries +The least recently used entry will be evicted + +[id="{version}-plugins-{type}s-{plugin}-default_hash"] +===== `default_hash` + + * Value type is <> + * Default value is `{}` + +Define a default object to use when lookup fails to return a matching row. +ensure that the key names of this object match the columns from the statement + +[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] +===== `jdbc_connection_string` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC connection string + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] +===== `jdbc_driver_class` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC driver class to load, for example "oracle.jdbc.OracleDriver" or "org.apache.derby.jdbc.ClientDriver" + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] +===== `jdbc_driver_library` + + * Value type is <> + * There is no default value for this setting. + +Tentative of abstracting JDBC logic to a mixin +for potential reuse in other plugins (input/output) +This method is called when someone includes this module +Add these methods to the 'base' given. +JDBC driver library path to third party driver library. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] +===== `jdbc_password` + + * Value type is <> + * There is no default value for this setting. + +JDBC password + +[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] +===== `jdbc_user` + + * Value type is <> + * There is no default value for this setting. + +JDBC user + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] +===== `jdbc_validate_connection` + + * Value type is <> + * Default value is `false` + +Connection pool configuration. +Validate connection before use. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] +===== `jdbc_validation_timeout` + + * Value type is <> + * Default value is `3600` + +Connection pool configuration. +How often to validate a connection (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * Default value is `{}` + +Hash of query parameter, for example `{ "id" => "id_field" }` + +[id="{version}-plugins-{type}s-{plugin}-statement"] +===== `statement` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Statement to execute. +To use parameters, use named parameter syntax, for example "SELECT * FROM MYTABLE WHERE ID = :id" + +[id="{version}-plugins-{type}s-{plugin}-tag_on_default_use"] +===== `tag_on_default_use` + + * Value type is <> + * Default value is `["_jdbcstreamingdefaultsused"]` + +Append values to the `tags` field if no record was found and default values were used + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_jdbcstreamingfailure"]` + +Append values to the `tags` field if sql error occured + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Define the target field to store the extracted result(s) +Field is overwritten if exists + +[id="{version}-plugins-{type}s-{plugin}-use_cache"] +===== `use_cache` + + * Value type is <> + * Default value is `true` + +Enable or disable caching, boolean true or false, defaults to true + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/jdbc_streaming-v1.0.2.asciidoc b/docs/versioned-plugins/filters/jdbc_streaming-v1.0.2.asciidoc new file mode 100644 index 000000000..3997ee0ab --- /dev/null +++ b/docs/versioned-plugins/filters/jdbc_streaming-v1.0.2.asciidoc @@ -0,0 +1,226 @@ +:plugin: jdbc_streaming +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-jdbc_streaming/blob/v1.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Jdbc_streaming filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter executes a SQL query and store the result set in the field +specified as `target`. +It will cache the results locally in an LRU cache with expiry + +For example you can load a row based on an id from in the event + +[source,ruby] +filter { + jdbc_streaming { + jdbc_driver_library => "/path/to/mysql-connector-java-5.1.34-bin.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => ""jdbc:mysql://localhost:3306/mydatabase" + jdbc_user => "me" + jdbc_password => "secret" + statement => "select * from WORLD.COUNTRY WHERE Code = :code" + parameters => { "code" => "country_code"} + target => "country_details" + } +} + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jdbc_streaming Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_expiration>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-default_hash>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_default_use>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-use_cache>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_expiration"] +===== `cache_expiration` + + * Value type is <> + * Default value is `5.0` + +The minimum number of seconds any entry should remain in the cache, defaults to 5 seconds +A numeric value, you can use decimals for example `{ "cache_expiration" => 0.25 }` +If there are transient jdbc errors the cache will store empty results for a given +parameter set and bypass the jbdc lookup, this merges the default_hash into the event, until +the cache entry expires, then the jdbc lookup will be tried again for the same parameters +Conversely, while the cache contains valid results any external problem that would cause +jdbc errors, will not be noticed for the cache_expiration period. + +[id="{version}-plugins-{type}s-{plugin}-cache_size"] +===== `cache_size` + + * Value type is <> + * Default value is `500` + +The maximum number of cache entries are stored, defaults to 500 entries +The least recently used entry will be evicted + +[id="{version}-plugins-{type}s-{plugin}-default_hash"] +===== `default_hash` + + * Value type is <> + * Default value is `{}` + +Define a default object to use when lookup fails to return a matching row. +ensure that the key names of this object match the columns from the statement + +[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] +===== `jdbc_connection_string` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC connection string + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] +===== `jdbc_driver_class` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC driver class to load, for example "oracle.jdbc.OracleDriver" or "org.apache.derby.jdbc.ClientDriver" + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] +===== `jdbc_driver_library` + + * Value type is <> + * There is no default value for this setting. + +Tentative of abstracting JDBC logic to a mixin +for potential reuse in other plugins (input/output) +This method is called when someone includes this module +Add these methods to the 'base' given. +JDBC driver library path to third party driver library. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] +===== `jdbc_password` + + * Value type is <> + * There is no default value for this setting. + +JDBC password + +[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] +===== `jdbc_user` + + * Value type is <> + * There is no default value for this setting. + +JDBC user + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] +===== `jdbc_validate_connection` + + * Value type is <> + * Default value is `false` + +Connection pool configuration. +Validate connection before use. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] +===== `jdbc_validation_timeout` + + * Value type is <> + * Default value is `3600` + +Connection pool configuration. +How often to validate a connection (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * Default value is `{}` + +Hash of query parameter, for example `{ "id" => "id_field" }` + +[id="{version}-plugins-{type}s-{plugin}-statement"] +===== `statement` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Statement to execute. +To use parameters, use named parameter syntax, for example "SELECT * FROM MYTABLE WHERE ID = :id" + +[id="{version}-plugins-{type}s-{plugin}-tag_on_default_use"] +===== `tag_on_default_use` + + * Value type is <> + * Default value is `["_jdbcstreamingdefaultsused"]` + +Append values to the `tags` field if no record was found and default values were used + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_jdbcstreamingfailure"]` + +Append values to the `tags` field if sql error occured + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Define the target field to store the extracted result(s) +Field is overwritten if exists + +[id="{version}-plugins-{type}s-{plugin}-use_cache"] +===== `use_cache` + + * Value type is <> + * Default value is `true` + +Enable or disable caching, boolean true or false, defaults to true + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/jdbc_streaming-v1.0.3.asciidoc b/docs/versioned-plugins/filters/jdbc_streaming-v1.0.3.asciidoc new file mode 100644 index 000000000..5728db64d --- /dev/null +++ b/docs/versioned-plugins/filters/jdbc_streaming-v1.0.3.asciidoc @@ -0,0 +1,226 @@ +:plugin: jdbc_streaming +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-jdbc_streaming/blob/v1.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Jdbc_streaming filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter executes a SQL query and store the result set in the field +specified as `target`. +It will cache the results locally in an LRU cache with expiry + +For example you can load a row based on an id from in the event + +[source,ruby] +filter { + jdbc_streaming { + jdbc_driver_library => "/path/to/mysql-connector-java-5.1.34-bin.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => ""jdbc:mysql://localhost:3306/mydatabase" + jdbc_user => "me" + jdbc_password => "secret" + statement => "select * from WORLD.COUNTRY WHERE Code = :code" + parameters => { "code" => "country_code"} + target => "country_details" + } +} + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jdbc_streaming Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_expiration>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-default_hash>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_default_use>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-use_cache>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_expiration"] +===== `cache_expiration` + + * Value type is <> + * Default value is `5.0` + +The minimum number of seconds any entry should remain in the cache, defaults to 5 seconds +A numeric value, you can use decimals for example `{ "cache_expiration" => 0.25 }` +If there are transient jdbc errors the cache will store empty results for a given +parameter set and bypass the jbdc lookup, this merges the default_hash into the event, until +the cache entry expires, then the jdbc lookup will be tried again for the same parameters +Conversely, while the cache contains valid results any external problem that would cause +jdbc errors, will not be noticed for the cache_expiration period. + +[id="{version}-plugins-{type}s-{plugin}-cache_size"] +===== `cache_size` + + * Value type is <> + * Default value is `500` + +The maximum number of cache entries are stored, defaults to 500 entries +The least recently used entry will be evicted + +[id="{version}-plugins-{type}s-{plugin}-default_hash"] +===== `default_hash` + + * Value type is <> + * Default value is `{}` + +Define a default object to use when lookup fails to return a matching row. +ensure that the key names of this object match the columns from the statement + +[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] +===== `jdbc_connection_string` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC connection string + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] +===== `jdbc_driver_class` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC driver class to load, for example "oracle.jdbc.OracleDriver" or "org.apache.derby.jdbc.ClientDriver" + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] +===== `jdbc_driver_library` + + * Value type is <> + * There is no default value for this setting. + +Tentative of abstracting JDBC logic to a mixin +for potential reuse in other plugins (input/output) +This method is called when someone includes this module +Add these methods to the 'base' given. +JDBC driver library path to third party driver library. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] +===== `jdbc_password` + + * Value type is <> + * There is no default value for this setting. + +JDBC password + +[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] +===== `jdbc_user` + + * Value type is <> + * There is no default value for this setting. + +JDBC user + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] +===== `jdbc_validate_connection` + + * Value type is <> + * Default value is `false` + +Connection pool configuration. +Validate connection before use. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] +===== `jdbc_validation_timeout` + + * Value type is <> + * Default value is `3600` + +Connection pool configuration. +How often to validate a connection (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * Default value is `{}` + +Hash of query parameter, for example `{ "id" => "id_field" }` + +[id="{version}-plugins-{type}s-{plugin}-statement"] +===== `statement` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Statement to execute. +To use parameters, use named parameter syntax, for example "SELECT * FROM MYTABLE WHERE ID = :id" + +[id="{version}-plugins-{type}s-{plugin}-tag_on_default_use"] +===== `tag_on_default_use` + + * Value type is <> + * Default value is `["_jdbcstreamingdefaultsused"]` + +Append values to the `tags` field if no record was found and default values were used + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_jdbcstreamingfailure"]` + +Append values to the `tags` field if sql error occured + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Define the target field to store the extracted result(s) +Field is overwritten if exists + +[id="{version}-plugins-{type}s-{plugin}-use_cache"] +===== `use_cache` + + * Value type is <> + * Default value is `true` + +Enable or disable caching, boolean true or false, defaults to true + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/json-index.asciidoc b/docs/versioned-plugins/filters/json-index.asciidoc new file mode 100644 index 000000000..2d863e241 --- /dev/null +++ b/docs/versioned-plugins/filters/json-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: json +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::json-v3.0.5.asciidoc[] +include::json-v3.0.4.asciidoc[] +include::json-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/filters/json-v3.0.3.asciidoc b/docs/versioned-plugins/filters/json-v3.0.3.asciidoc new file mode 100644 index 000000000..349e6fb5f --- /dev/null +++ b/docs/versioned-plugins/filters/json-v3.0.3.asciidoc @@ -0,0 +1,121 @@ +:plugin: json +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-json/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Json filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This is a JSON parsing filter. It takes an existing field which contains JSON and +expands it into an actual data structure within the Logstash event. + +By default it will place the parsed JSON in the root (top level) of the Logstash event, but this +filter can be configured to place the JSON into any arbitrary event field, using the +`target` configuration. + +This plugin has a few fallback scenario when something bad happen during the parsing of the event. +If the JSON parsing fails on the data, the event will be untouched and it will be tagged with a +`_jsonparsefailure` then you can use conditionals to clean the data. You can configured this tag with then +`tag_on_failure` option. + +If the parsed data contains a `@timestamp` field, we will try to use it for the event's `@timestamp`, if the +parsing fails, the field will be renamed to `_@timestamp` and the event will be tagged with a +`_timestampparsefailure`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Json Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-skip_on_invalid_json>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-skip_on_invalid_json"] +===== `skip_on_invalid_json` + + * Value type is <> + * Default value is `false` + +Allow to skip filter on invalid json (allows to handle json and non-json data without warnings) + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The configuration for the JSON filter: +[source,ruby] + source => source_field + +For example, if you have JSON data in the `message` field: +[source,ruby] + filter { + json { + source => "message" + } + } + +The above would parse the json from the `message` field + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_jsonparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define the target field for placing the parsed data. If this setting is +omitted, the JSON data will be stored at the root (top level) of the event. + +For example, if you want the data to be put in the `doc` field: +[source,ruby] + filter { + json { + target => "doc" + } + } + +JSON in the value of the `source` field will be expanded into a +data structure in the `target` field. + +NOTE: if the `target` field already exists, it will be overwritten! + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/json-v3.0.4.asciidoc b/docs/versioned-plugins/filters/json-v3.0.4.asciidoc new file mode 100644 index 000000000..ab2b60227 --- /dev/null +++ b/docs/versioned-plugins/filters/json-v3.0.4.asciidoc @@ -0,0 +1,121 @@ +:plugin: json +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-json/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Json filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This is a JSON parsing filter. It takes an existing field which contains JSON and +expands it into an actual data structure within the Logstash event. + +By default it will place the parsed JSON in the root (top level) of the Logstash event, but this +filter can be configured to place the JSON into any arbitrary event field, using the +`target` configuration. + +This plugin has a few fallback scenario when something bad happen during the parsing of the event. +If the JSON parsing fails on the data, the event will be untouched and it will be tagged with a +`_jsonparsefailure` then you can use conditionals to clean the data. You can configured this tag with then +`tag_on_failure` option. + +If the parsed data contains a `@timestamp` field, we will try to use it for the event's `@timestamp`, if the +parsing fails, the field will be renamed to `_@timestamp` and the event will be tagged with a +`_timestampparsefailure`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Json Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-skip_on_invalid_json>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-skip_on_invalid_json"] +===== `skip_on_invalid_json` + + * Value type is <> + * Default value is `false` + +Allow to skip filter on invalid json (allows to handle json and non-json data without warnings) + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The configuration for the JSON filter: +[source,ruby] + source => source_field + +For example, if you have JSON data in the `message` field: +[source,ruby] + filter { + json { + source => "message" + } + } + +The above would parse the json from the `message` field + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_jsonparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define the target field for placing the parsed data. If this setting is +omitted, the JSON data will be stored at the root (top level) of the event. + +For example, if you want the data to be put in the `doc` field: +[source,ruby] + filter { + json { + target => "doc" + } + } + +JSON in the value of the `source` field will be expanded into a +data structure in the `target` field. + +NOTE: if the `target` field already exists, it will be overwritten! + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/json-v3.0.5.asciidoc b/docs/versioned-plugins/filters/json-v3.0.5.asciidoc new file mode 100644 index 000000000..48670c780 --- /dev/null +++ b/docs/versioned-plugins/filters/json-v3.0.5.asciidoc @@ -0,0 +1,121 @@ +:plugin: json +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-json/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Json filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This is a JSON parsing filter. It takes an existing field which contains JSON and +expands it into an actual data structure within the Logstash event. + +By default it will place the parsed JSON in the root (top level) of the Logstash event, but this +filter can be configured to place the JSON into any arbitrary event field, using the +`target` configuration. + +This plugin has a few fallback scenario when something bad happen during the parsing of the event. +If the JSON parsing fails on the data, the event will be untouched and it will be tagged with a +`_jsonparsefailure` then you can use conditionals to clean the data. You can configured this tag with then +`tag_on_failure` option. + +If the parsed data contains a `@timestamp` field, we will try to use it for the event's `@timestamp`, if the +parsing fails, the field will be renamed to `_@timestamp` and the event will be tagged with a +`_timestampparsefailure`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Json Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-skip_on_invalid_json>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-skip_on_invalid_json"] +===== `skip_on_invalid_json` + + * Value type is <> + * Default value is `false` + +Allow to skip filter on invalid json (allows to handle json and non-json data without warnings) + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The configuration for the JSON filter: +[source,ruby] + source => source_field + +For example, if you have JSON data in the `message` field: +[source,ruby] + filter { + json { + source => "message" + } + } + +The above would parse the json from the `message` field + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_jsonparsefailure"]` + +Append values to the `tags` field when there has been no +successful match + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define the target field for placing the parsed data. If this setting is +omitted, the JSON data will be stored at the root (top level) of the event. + +For example, if you want the data to be put in the `doc` field: +[source,ruby] + filter { + json { + target => "doc" + } + } + +JSON in the value of the `source` field will be expanded into a +data structure in the `target` field. + +NOTE: if the `target` field already exists, it will be overwritten! + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/json_encode-index.asciidoc b/docs/versioned-plugins/filters/json_encode-index.asciidoc new file mode 100644 index 000000000..bbff604c3 --- /dev/null +++ b/docs/versioned-plugins/filters/json_encode-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: json_encode +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::json_encode-v3.0.3.asciidoc[] +include::json_encode-v3.0.2.asciidoc[] +include::json_encode-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/json_encode-v3.0.1.asciidoc b/docs/versioned-plugins/filters/json_encode-v3.0.1.asciidoc new file mode 100644 index 000000000..2b060a5dc --- /dev/null +++ b/docs/versioned-plugins/filters/json_encode-v3.0.1.asciidoc @@ -0,0 +1,76 @@ +:plugin: json_encode +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-json_encode/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Json_encode filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +JSON encode filter. Takes a field and serializes it into JSON + +If no target is specified, the source field is overwritten with the JSON +text. + +For example, if you have a field named `foo`, and you want to store the +JSON encoded string in `bar`, do this: +[source,ruby] + filter { + json_encode { + source => "foo" + target => "bar" + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Json_encode Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field to convert to JSON. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +The field to write the JSON into. If not specified, the source +field will be overwritten. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/json_encode-v3.0.2.asciidoc b/docs/versioned-plugins/filters/json_encode-v3.0.2.asciidoc new file mode 100644 index 000000000..a5559815c --- /dev/null +++ b/docs/versioned-plugins/filters/json_encode-v3.0.2.asciidoc @@ -0,0 +1,76 @@ +:plugin: json_encode +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-json_encode/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Json_encode filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +JSON encode filter. Takes a field and serializes it into JSON + +If no target is specified, the source field is overwritten with the JSON +text. + +For example, if you have a field named `foo`, and you want to store the +JSON encoded string in `bar`, do this: +[source,ruby] + filter { + json_encode { + source => "foo" + target => "bar" + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Json_encode Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field to convert to JSON. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +The field to write the JSON into. If not specified, the source +field will be overwritten. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/json_encode-v3.0.3.asciidoc b/docs/versioned-plugins/filters/json_encode-v3.0.3.asciidoc new file mode 100644 index 000000000..f20f3f037 --- /dev/null +++ b/docs/versioned-plugins/filters/json_encode-v3.0.3.asciidoc @@ -0,0 +1,76 @@ +:plugin: json_encode +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-json_encode/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Json_encode filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +JSON encode filter. Takes a field and serializes it into JSON + +If no target is specified, the source field is overwritten with the JSON +text. + +For example, if you have a field named `foo`, and you want to store the +JSON encoded string in `bar`, do this: +[source,ruby] + filter { + json_encode { + source => "foo" + target => "bar" + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Json_encode Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field to convert to JSON. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +The field to write the JSON into. If not specified, the source +field will be overwritten. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/kubernetes_metadata-index.asciidoc b/docs/versioned-plugins/filters/kubernetes_metadata-index.asciidoc new file mode 100644 index 000000000..78b609133 --- /dev/null +++ b/docs/versioned-plugins/filters/kubernetes_metadata-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: kubernetes_metadata +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/filters/kv-index.asciidoc b/docs/versioned-plugins/filters/kv-index.asciidoc new file mode 100644 index 000000000..afe7e4d62 --- /dev/null +++ b/docs/versioned-plugins/filters/kv-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: kv +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::kv-v4.0.3.asciidoc[] +include::kv-v4.0.2.asciidoc[] +include::kv-v4.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/kv-v4.0.1.asciidoc b/docs/versioned-plugins/filters/kv-v4.0.1.asciidoc new file mode 100644 index 000000000..1bedceb04 --- /dev/null +++ b/docs/versioned-plugins/filters/kv-v4.0.1.asciidoc @@ -0,0 +1,409 @@ +:plugin: kv +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-kv/blob/v4.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Kv filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter helps automatically parse messages (or specific event fields) +which are of the `foo=bar` variety. + +For example, if you have a log message which contains `ip=1.2.3.4 +error=REFUSED`, you can parse those automatically by configuring: +[source,ruby] + filter { + kv { } + } + +The above will result in a message of `ip=1.2.3.4 error=REFUSED` having +the fields: + +* `ip: 1.2.3.4` +* `error: REFUSED` + +This is great for postfix, iptables, and other types of logs that +tend towards `key=value` syntax. + +You can configure any arbitrary strings to split your data on, +in case your data is not structured using `=` signs and whitespace. +For example, this filter can also be used to parse query parameters like +`foo=bar&baz=fizz` by setting the `field_split` parameter to `&`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kv Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-allow_duplicate_values>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-default_keys>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_keys>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_split>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_brackets>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_keys>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-recursive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-remove_char_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-remove_char_value>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-transform_key>> |<>, one of `["lowercase", "uppercase", "capitalize"]`|No +| <<{version}-plugins-{type}s-{plugin}-transform_value>> |<>, one of `["lowercase", "uppercase", "capitalize"]`|No +| <<{version}-plugins-{type}s-{plugin}-trim_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-trim_value>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-value_split>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-allow_duplicate_values"] +===== `allow_duplicate_values` + + * Value type is <> + * Default value is `true` + +A bool option for removing duplicate key/value pairs. When set to false, only +one unique key/value pair will be preserved. + +For example, consider a source like `from=me from=me`. `[from]` will map to +an Array with two elements: `["me", "me"]`. To only keep unique key/value pairs, +you could use this configuration: +[source,ruby] + filter { + kv { + allow_duplicate_values => false + } + } + +[id="{version}-plugins-{type}s-{plugin}-default_keys"] +===== `default_keys` + + * Value type is <> + * Default value is `{}` + +A hash specifying the default keys and their values which should be added to the event +in case these keys do not exist in the source field being parsed. +[source,ruby] + filter { + kv { + default_keys => [ "from", "logstash@example.com", + "to", "default@dev.null" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-exclude_keys"] +===== `exclude_keys` + + * Value type is <> + * Default value is `[]` + +An array specifying the parsed keys which should not be added to the event. +By default no keys will be excluded. + +For example, consider a source like `Hey, from=, to=def foo=bar`. +To exclude `from` and `to`, but retain the `foo` key, you could use this configuration: +[source,ruby] + filter { + kv { + exclude_keys => [ "from", "to" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-field_split"] +===== `field_split` + + * Value type is <> + * Default value is `" "` + +A string of characters to use as delimiters for parsing out key-value pairs. + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +#### Example with URL Query Strings + +For example, to split out the args from a url query string such as +`?pin=12345~0&d=123&e=foo@bar.com&oq=bobo&ss=12345`: +[source,ruby] + filter { + kv { + field_split => "&?" + } + } + +The above splits on both `&` and `?` characters, giving you the following +fields: + +* `pin: 12345~0` +* `d: 123` +* `e: foo@bar.com` +* `oq: bobo` +* `ss: 12345` + +[id="{version}-plugins-{type}s-{plugin}-include_brackets"] +===== `include_brackets` + + * Value type is <> + * Default value is `true` + +A boolean specifying whether to treat square brackets, angle brackets, +and parentheses as value "wrappers" that should be removed from the value. +[source,ruby] + filter { + kv { + include_brackets => true + } + } + +For example, the result of this line: +`bracketsone=(hello world) bracketstwo=[hello world] bracketsthree=` + +will be: + +* bracketsone: hello world +* bracketstwo: hello world +* bracketsthree: hello world + +instead of: + +* bracketsone: (hello +* bracketstwo: [hello +* bracketsthree: > + * Default value is `[]` + +An array specifying the parsed keys which should be added to the event. +By default all keys will be added. + +For example, consider a source like `Hey, from=, to=def foo=bar`. +To include `from` and `to`, but exclude the `foo` key, you could use this configuration: +[source,ruby] + filter { + kv { + include_keys => [ "from", "to" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is <> + * Default value is `""` + +A string to prepend to all of the extracted keys. + +For example, to prepend arg_ to all keys: +[source,ruby] + filter { kv { prefix => "arg_" } } + +[id="{version}-plugins-{type}s-{plugin}-recursive"] +===== `recursive` + + * Value type is <> + * Default value is `false` + +A boolean specifying whether to drill down into values +and recursively get more key-value pairs from it. +The extra key-value pairs will be stored as subkeys of the root key. + +Default is not to recursive values. +[source,ruby] + filter { + kv { + recursive => "true" + } + } + + +[id="{version}-plugins-{type}s-{plugin}-remove_char_key"] +===== `remove_char_key` + + * Value type is <> + * There is no default value for this setting. + +A string of characters to remove from the key. + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +Contrary to trim option, all characters are removed from the key, whatever their position. + +For example, to remove `<` `>` `[` `]` and `,` characters from keys: +[source,ruby] + filter { + kv { + remove_char_key => "<>\[\]," + } + } + +[id="{version}-plugins-{type}s-{plugin}-remove_char_value"] +===== `remove_char_value` + + * Value type is <> + * There is no default value for this setting. + +A string of characters to remove from the value. + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +Contrary to trim option, all characters are removed from the value, whatever their position. + +For example, to remove `<`, `>`, `[`, `]` and `,` characters from values: +[source,ruby] + filter { + kv { + remove_char_value => "<>\[\]," + } + } + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The field to perform `key=value` searching on + +For example, to process the `not_the_message` field: +[source,ruby] + filter { kv { source => "not_the_message" } } + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +The name of the container to put all of the key-value pairs into. + +If this setting is omitted, fields will be written to the root of the +event, as individual fields. + +For example, to place all keys into the event field kv: +[source,ruby] + filter { kv { target => "kv" } } + +[id="{version}-plugins-{type}s-{plugin}-transform_key"] +===== `transform_key` + + * Value can be any of: `lowercase`, `uppercase`, `capitalize` + * There is no default value for this setting. + +Transform keys to lower case, upper case or capitals. + +For example, to lowercase all keys: +[source,ruby] + filter { + kv { + transform_key => "lowercase" + } + } + +[id="{version}-plugins-{type}s-{plugin}-transform_value"] +===== `transform_value` + + * Value can be any of: `lowercase`, `uppercase`, `capitalize` + * There is no default value for this setting. + +Transform values to lower case, upper case or capitals. + +For example, to capitalize all values: +[source,ruby] + filter { + kv { + transform_value => "capitalize" + } + } + +[id="{version}-plugins-{type}s-{plugin}-trim_key"] +===== `trim_key` + + * Value type is <> + * There is no default value for this setting. + +A string of characters to trim from the key. This is useful if your +keys are wrapped in brackets or start with space. + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +Only leading and trailing characters are trimed from the key. + +For example, to trim `<` `>` `[` `]` and `,` characters from keys: +[source,ruby] + filter { + kv { + trim_key => "<>\[\]," + } + } + +[id="{version}-plugins-{type}s-{plugin}-trim_value"] +===== `trim_value` + + * Value type is <> + * There is no default value for this setting. + +Constants used for transform check +A string of characters to trim from the value. This is useful if your +values are wrapped in brackets or are terminated with commas (like postfix +logs). + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +Only leading and trailing characters are trimed from the value. + +For example, to trim `<`, `>`, `[`, `]` and `,` characters from values: +[source,ruby] + filter { + kv { + trim_value => "<>\[\]," + } + } + +[id="{version}-plugins-{type}s-{plugin}-value_split"] +===== `value_split` + + * Value type is <> + * Default value is `"="` + +A non-empty string of characters to use as delimiters for identifying key-value relations. + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +For example, to identify key-values such as +`key1:value1 key2:value2`: +[source,ruby] + filter { kv { value_split => ":" } } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/kv-v4.0.2.asciidoc b/docs/versioned-plugins/filters/kv-v4.0.2.asciidoc new file mode 100644 index 000000000..3d08ad529 --- /dev/null +++ b/docs/versioned-plugins/filters/kv-v4.0.2.asciidoc @@ -0,0 +1,409 @@ +:plugin: kv +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-kv/blob/v4.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Kv filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter helps automatically parse messages (or specific event fields) +which are of the `foo=bar` variety. + +For example, if you have a log message which contains `ip=1.2.3.4 +error=REFUSED`, you can parse those automatically by configuring: +[source,ruby] + filter { + kv { } + } + +The above will result in a message of `ip=1.2.3.4 error=REFUSED` having +the fields: + +* `ip: 1.2.3.4` +* `error: REFUSED` + +This is great for postfix, iptables, and other types of logs that +tend towards `key=value` syntax. + +You can configure any arbitrary strings to split your data on, +in case your data is not structured using `=` signs and whitespace. +For example, this filter can also be used to parse query parameters like +`foo=bar&baz=fizz` by setting the `field_split` parameter to `&`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kv Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-allow_duplicate_values>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-default_keys>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_keys>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_split>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_brackets>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_keys>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-recursive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-remove_char_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-remove_char_value>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-transform_key>> |<>, one of `["lowercase", "uppercase", "capitalize"]`|No +| <<{version}-plugins-{type}s-{plugin}-transform_value>> |<>, one of `["lowercase", "uppercase", "capitalize"]`|No +| <<{version}-plugins-{type}s-{plugin}-trim_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-trim_value>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-value_split>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-allow_duplicate_values"] +===== `allow_duplicate_values` + + * Value type is <> + * Default value is `true` + +A bool option for removing duplicate key/value pairs. When set to false, only +one unique key/value pair will be preserved. + +For example, consider a source like `from=me from=me`. `[from]` will map to +an Array with two elements: `["me", "me"]`. To only keep unique key/value pairs, +you could use this configuration: +[source,ruby] + filter { + kv { + allow_duplicate_values => false + } + } + +[id="{version}-plugins-{type}s-{plugin}-default_keys"] +===== `default_keys` + + * Value type is <> + * Default value is `{}` + +A hash specifying the default keys and their values which should be added to the event +in case these keys do not exist in the source field being parsed. +[source,ruby] + filter { + kv { + default_keys => [ "from", "logstash@example.com", + "to", "default@dev.null" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-exclude_keys"] +===== `exclude_keys` + + * Value type is <> + * Default value is `[]` + +An array specifying the parsed keys which should not be added to the event. +By default no keys will be excluded. + +For example, consider a source like `Hey, from=, to=def foo=bar`. +To exclude `from` and `to`, but retain the `foo` key, you could use this configuration: +[source,ruby] + filter { + kv { + exclude_keys => [ "from", "to" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-field_split"] +===== `field_split` + + * Value type is <> + * Default value is `" "` + +A string of characters to use as delimiters for parsing out key-value pairs. + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +#### Example with URL Query Strings + +For example, to split out the args from a url query string such as +`?pin=12345~0&d=123&e=foo@bar.com&oq=bobo&ss=12345`: +[source,ruby] + filter { + kv { + field_split => "&?" + } + } + +The above splits on both `&` and `?` characters, giving you the following +fields: + +* `pin: 12345~0` +* `d: 123` +* `e: foo@bar.com` +* `oq: bobo` +* `ss: 12345` + +[id="{version}-plugins-{type}s-{plugin}-include_brackets"] +===== `include_brackets` + + * Value type is <> + * Default value is `true` + +A boolean specifying whether to treat square brackets, angle brackets, +and parentheses as value "wrappers" that should be removed from the value. +[source,ruby] + filter { + kv { + include_brackets => true + } + } + +For example, the result of this line: +`bracketsone=(hello world) bracketstwo=[hello world] bracketsthree=` + +will be: + +* bracketsone: hello world +* bracketstwo: hello world +* bracketsthree: hello world + +instead of: + +* bracketsone: (hello +* bracketstwo: [hello +* bracketsthree: > + * Default value is `[]` + +An array specifying the parsed keys which should be added to the event. +By default all keys will be added. + +For example, consider a source like `Hey, from=, to=def foo=bar`. +To include `from` and `to`, but exclude the `foo` key, you could use this configuration: +[source,ruby] + filter { + kv { + include_keys => [ "from", "to" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is <> + * Default value is `""` + +A string to prepend to all of the extracted keys. + +For example, to prepend arg_ to all keys: +[source,ruby] + filter { kv { prefix => "arg_" } } + +[id="{version}-plugins-{type}s-{plugin}-recursive"] +===== `recursive` + + * Value type is <> + * Default value is `false` + +A boolean specifying whether to drill down into values +and recursively get more key-value pairs from it. +The extra key-value pairs will be stored as subkeys of the root key. + +Default is not to recursive values. +[source,ruby] + filter { + kv { + recursive => "true" + } + } + + +[id="{version}-plugins-{type}s-{plugin}-remove_char_key"] +===== `remove_char_key` + + * Value type is <> + * There is no default value for this setting. + +A string of characters to remove from the key. + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +Contrary to trim option, all characters are removed from the key, whatever their position. + +For example, to remove `<` `>` `[` `]` and `,` characters from keys: +[source,ruby] + filter { + kv { + remove_char_key => "<>\[\]," + } + } + +[id="{version}-plugins-{type}s-{plugin}-remove_char_value"] +===== `remove_char_value` + + * Value type is <> + * There is no default value for this setting. + +A string of characters to remove from the value. + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +Contrary to trim option, all characters are removed from the value, whatever their position. + +For example, to remove `<`, `>`, `[`, `]` and `,` characters from values: +[source,ruby] + filter { + kv { + remove_char_value => "<>\[\]," + } + } + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The field to perform `key=value` searching on + +For example, to process the `not_the_message` field: +[source,ruby] + filter { kv { source => "not_the_message" } } + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +The name of the container to put all of the key-value pairs into. + +If this setting is omitted, fields will be written to the root of the +event, as individual fields. + +For example, to place all keys into the event field kv: +[source,ruby] + filter { kv { target => "kv" } } + +[id="{version}-plugins-{type}s-{plugin}-transform_key"] +===== `transform_key` + + * Value can be any of: `lowercase`, `uppercase`, `capitalize` + * There is no default value for this setting. + +Transform keys to lower case, upper case or capitals. + +For example, to lowercase all keys: +[source,ruby] + filter { + kv { + transform_key => "lowercase" + } + } + +[id="{version}-plugins-{type}s-{plugin}-transform_value"] +===== `transform_value` + + * Value can be any of: `lowercase`, `uppercase`, `capitalize` + * There is no default value for this setting. + +Transform values to lower case, upper case or capitals. + +For example, to capitalize all values: +[source,ruby] + filter { + kv { + transform_value => "capitalize" + } + } + +[id="{version}-plugins-{type}s-{plugin}-trim_key"] +===== `trim_key` + + * Value type is <> + * There is no default value for this setting. + +A string of characters to trim from the key. This is useful if your +keys are wrapped in brackets or start with space. + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +Only leading and trailing characters are trimed from the key. + +For example, to trim `<` `>` `[` `]` and `,` characters from keys: +[source,ruby] + filter { + kv { + trim_key => "<>\[\]," + } + } + +[id="{version}-plugins-{type}s-{plugin}-trim_value"] +===== `trim_value` + + * Value type is <> + * There is no default value for this setting. + +Constants used for transform check +A string of characters to trim from the value. This is useful if your +values are wrapped in brackets or are terminated with commas (like postfix +logs). + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +Only leading and trailing characters are trimed from the value. + +For example, to trim `<`, `>`, `[`, `]` and `,` characters from values: +[source,ruby] + filter { + kv { + trim_value => "<>\[\]," + } + } + +[id="{version}-plugins-{type}s-{plugin}-value_split"] +===== `value_split` + + * Value type is <> + * Default value is `"="` + +A non-empty string of characters to use as delimiters for identifying key-value relations. + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +For example, to identify key-values such as +`key1:value1 key2:value2`: +[source,ruby] + filter { kv { value_split => ":" } } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/kv-v4.0.3.asciidoc b/docs/versioned-plugins/filters/kv-v4.0.3.asciidoc new file mode 100644 index 000000000..2dbd0070c --- /dev/null +++ b/docs/versioned-plugins/filters/kv-v4.0.3.asciidoc @@ -0,0 +1,409 @@ +:plugin: kv +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-kv/blob/v4.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Kv filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter helps automatically parse messages (or specific event fields) +which are of the `foo=bar` variety. + +For example, if you have a log message which contains `ip=1.2.3.4 +error=REFUSED`, you can parse those automatically by configuring: +[source,ruby] + filter { + kv { } + } + +The above will result in a message of `ip=1.2.3.4 error=REFUSED` having +the fields: + +* `ip: 1.2.3.4` +* `error: REFUSED` + +This is great for postfix, iptables, and other types of logs that +tend towards `key=value` syntax. + +You can configure any arbitrary strings to split your data on, +in case your data is not structured using `=` signs and whitespace. +For example, this filter can also be used to parse query parameters like +`foo=bar&baz=fizz` by setting the `field_split` parameter to `&`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kv Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-allow_duplicate_values>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-default_keys>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_keys>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_split>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_brackets>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_keys>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-recursive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-remove_char_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-remove_char_value>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-transform_key>> |<>, one of `["lowercase", "uppercase", "capitalize"]`|No +| <<{version}-plugins-{type}s-{plugin}-transform_value>> |<>, one of `["lowercase", "uppercase", "capitalize"]`|No +| <<{version}-plugins-{type}s-{plugin}-trim_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-trim_value>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-value_split>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-allow_duplicate_values"] +===== `allow_duplicate_values` + + * Value type is <> + * Default value is `true` + +A bool option for removing duplicate key/value pairs. When set to false, only +one unique key/value pair will be preserved. + +For example, consider a source like `from=me from=me`. `[from]` will map to +an Array with two elements: `["me", "me"]`. To only keep unique key/value pairs, +you could use this configuration: +[source,ruby] + filter { + kv { + allow_duplicate_values => false + } + } + +[id="{version}-plugins-{type}s-{plugin}-default_keys"] +===== `default_keys` + + * Value type is <> + * Default value is `{}` + +A hash specifying the default keys and their values which should be added to the event +in case these keys do not exist in the source field being parsed. +[source,ruby] + filter { + kv { + default_keys => [ "from", "logstash@example.com", + "to", "default@dev.null" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-exclude_keys"] +===== `exclude_keys` + + * Value type is <> + * Default value is `[]` + +An array specifying the parsed keys which should not be added to the event. +By default no keys will be excluded. + +For example, consider a source like `Hey, from=, to=def foo=bar`. +To exclude `from` and `to`, but retain the `foo` key, you could use this configuration: +[source,ruby] + filter { + kv { + exclude_keys => [ "from", "to" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-field_split"] +===== `field_split` + + * Value type is <> + * Default value is `" "` + +A string of characters to use as delimiters for parsing out key-value pairs. + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +#### Example with URL Query Strings + +For example, to split out the args from a url query string such as +`?pin=12345~0&d=123&e=foo@bar.com&oq=bobo&ss=12345`: +[source,ruby] + filter { + kv { + field_split => "&?" + } + } + +The above splits on both `&` and `?` characters, giving you the following +fields: + +* `pin: 12345~0` +* `d: 123` +* `e: foo@bar.com` +* `oq: bobo` +* `ss: 12345` + +[id="{version}-plugins-{type}s-{plugin}-include_brackets"] +===== `include_brackets` + + * Value type is <> + * Default value is `true` + +A boolean specifying whether to treat square brackets, angle brackets, +and parentheses as value "wrappers" that should be removed from the value. +[source,ruby] + filter { + kv { + include_brackets => true + } + } + +For example, the result of this line: +`bracketsone=(hello world) bracketstwo=[hello world] bracketsthree=` + +will be: + +* bracketsone: hello world +* bracketstwo: hello world +* bracketsthree: hello world + +instead of: + +* bracketsone: (hello +* bracketstwo: [hello +* bracketsthree: > + * Default value is `[]` + +An array specifying the parsed keys which should be added to the event. +By default all keys will be added. + +For example, consider a source like `Hey, from=, to=def foo=bar`. +To include `from` and `to`, but exclude the `foo` key, you could use this configuration: +[source,ruby] + filter { + kv { + include_keys => [ "from", "to" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is <> + * Default value is `""` + +A string to prepend to all of the extracted keys. + +For example, to prepend arg_ to all keys: +[source,ruby] + filter { kv { prefix => "arg_" } } + +[id="{version}-plugins-{type}s-{plugin}-recursive"] +===== `recursive` + + * Value type is <> + * Default value is `false` + +A boolean specifying whether to drill down into values +and recursively get more key-value pairs from it. +The extra key-value pairs will be stored as subkeys of the root key. + +Default is not to recursive values. +[source,ruby] + filter { + kv { + recursive => "true" + } + } + + +[id="{version}-plugins-{type}s-{plugin}-remove_char_key"] +===== `remove_char_key` + + * Value type is <> + * There is no default value for this setting. + +A string of characters to remove from the key. + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +Contrary to trim option, all characters are removed from the key, whatever their position. + +For example, to remove `<` `>` `[` `]` and `,` characters from keys: +[source,ruby] + filter { + kv { + remove_char_key => "<>\[\]," + } + } + +[id="{version}-plugins-{type}s-{plugin}-remove_char_value"] +===== `remove_char_value` + + * Value type is <> + * There is no default value for this setting. + +A string of characters to remove from the value. + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +Contrary to trim option, all characters are removed from the value, whatever their position. + +For example, to remove `<`, `>`, `[`, `]` and `,` characters from values: +[source,ruby] + filter { + kv { + remove_char_value => "<>\[\]," + } + } + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The field to perform `key=value` searching on + +For example, to process the `not_the_message` field: +[source,ruby] + filter { kv { source => "not_the_message" } } + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +The name of the container to put all of the key-value pairs into. + +If this setting is omitted, fields will be written to the root of the +event, as individual fields. + +For example, to place all keys into the event field kv: +[source,ruby] + filter { kv { target => "kv" } } + +[id="{version}-plugins-{type}s-{plugin}-transform_key"] +===== `transform_key` + + * Value can be any of: `lowercase`, `uppercase`, `capitalize` + * There is no default value for this setting. + +Transform keys to lower case, upper case or capitals. + +For example, to lowercase all keys: +[source,ruby] + filter { + kv { + transform_key => "lowercase" + } + } + +[id="{version}-plugins-{type}s-{plugin}-transform_value"] +===== `transform_value` + + * Value can be any of: `lowercase`, `uppercase`, `capitalize` + * There is no default value for this setting. + +Transform values to lower case, upper case or capitals. + +For example, to capitalize all values: +[source,ruby] + filter { + kv { + transform_value => "capitalize" + } + } + +[id="{version}-plugins-{type}s-{plugin}-trim_key"] +===== `trim_key` + + * Value type is <> + * There is no default value for this setting. + +A string of characters to trim from the key. This is useful if your +keys are wrapped in brackets or start with space. + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +Only leading and trailing characters are trimed from the key. + +For example, to trim `<` `>` `[` `]` and `,` characters from keys: +[source,ruby] + filter { + kv { + trim_key => "<>\[\]," + } + } + +[id="{version}-plugins-{type}s-{plugin}-trim_value"] +===== `trim_value` + + * Value type is <> + * There is no default value for this setting. + +Constants used for transform check +A string of characters to trim from the value. This is useful if your +values are wrapped in brackets or are terminated with commas (like postfix +logs). + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +Only leading and trailing characters are trimed from the value. + +For example, to trim `<`, `>`, `[`, `]` and `,` characters from values: +[source,ruby] + filter { + kv { + trim_value => "<>\[\]," + } + } + +[id="{version}-plugins-{type}s-{plugin}-value_split"] +===== `value_split` + + * Value type is <> + * Default value is `"="` + +A non-empty string of characters to use as delimiters for identifying key-value relations. + +These characters form a regex character class and thus you must escape special regex +characters like `[` or `]` using `\`. + +For example, to identify key-values such as +`key1:value1 key2:value2`: +[source,ruby] + filter { kv { value_split => ":" } } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/language-index.asciidoc b/docs/versioned-plugins/filters/language-index.asciidoc new file mode 100644 index 000000000..6feb0e015 --- /dev/null +++ b/docs/versioned-plugins/filters/language-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: language +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/filters/lookup-index.asciidoc b/docs/versioned-plugins/filters/lookup-index.asciidoc new file mode 100644 index 000000000..a488c3855 --- /dev/null +++ b/docs/versioned-plugins/filters/lookup-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: lookup +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/filters/math-index.asciidoc b/docs/versioned-plugins/filters/math-index.asciidoc new file mode 100644 index 000000000..f2aba84a1 --- /dev/null +++ b/docs/versioned-plugins/filters/math-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: math +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/filters/metaevent-index.asciidoc b/docs/versioned-plugins/filters/metaevent-index.asciidoc new file mode 100644 index 000000000..c954ee15b --- /dev/null +++ b/docs/versioned-plugins/filters/metaevent-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: metaevent +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::metaevent-v2.0.7.asciidoc[] +include::metaevent-v2.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/filters/metaevent-v2.0.5.asciidoc b/docs/versioned-plugins/filters/metaevent-v2.0.5.asciidoc new file mode 100644 index 000000000..05a009ee1 --- /dev/null +++ b/docs/versioned-plugins/filters/metaevent-v2.0.5.asciidoc @@ -0,0 +1,62 @@ +:plugin: metaevent +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-metaevent/blob/v2.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Metaevent filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Metaevent Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-followed_by_tags>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-period>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-followed_by_tags"] +===== `followed_by_tags` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +syntax: `followed_by_tags => [ "tag", "tag" ]` + +[id="{version}-plugins-{type}s-{plugin}-period"] +===== `period` + + * Value type is <> + * Default value is `5` + +syntax: `period => 60` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/metaevent-v2.0.7.asciidoc b/docs/versioned-plugins/filters/metaevent-v2.0.7.asciidoc new file mode 100644 index 000000000..7394fcb66 --- /dev/null +++ b/docs/versioned-plugins/filters/metaevent-v2.0.7.asciidoc @@ -0,0 +1,62 @@ +:plugin: metaevent +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.7 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-metaevent/blob/v2.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Metaevent filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Periodically group all events under a certain list of tags into a single event. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Metaevent Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-followed_by_tags>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-period>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-followed_by_tags"] +===== `followed_by_tags` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +syntax: `followed_by_tags => [ "tag", "tag" ]` + +[id="{version}-plugins-{type}s-{plugin}-period"] +===== `period` + + * Value type is <> + * Default value is `5` + +syntax: `period => 60` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/metricize-index.asciidoc b/docs/versioned-plugins/filters/metricize-index.asciidoc new file mode 100644 index 000000000..79132cc71 --- /dev/null +++ b/docs/versioned-plugins/filters/metricize-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: metricize +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::metricize-v3.0.3.asciidoc[] +include::metricize-v3.0.2.asciidoc[] +include::metricize-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/metricize-v3.0.1.asciidoc b/docs/versioned-plugins/filters/metricize-v3.0.1.asciidoc new file mode 100644 index 000000000..f720d899b --- /dev/null +++ b/docs/versioned-plugins/filters/metricize-v3.0.1.asciidoc @@ -0,0 +1,109 @@ +:plugin: metricize +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-metricize/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Metricize filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The metricize filter takes complex events containing a number of metrics +and splits these up into multiple events, each holding a single metric. + +Example: + + Assume the following filter configuration: + + filter { + metricize { + metrics => [ "metric1", "metric2" ] + } + } + + Assuming the following event is passed in: + + { + type => "type A" + metric1 => "value1" + metric2 => "value2" + } + + This will result in the following 2 events being generated in addition to the original event: + + { { + type => "type A" type => "type A" + metric => "metric1" metric => "metric2" + value => "value1" value => "value2" + } } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Metricize Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-drop_original_event>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metric_field_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-value_field_name>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-drop_original_event"] +===== `drop_original_event` + + * Value type is <> + * Default value is `false` + +Flag indicating whether the original event should be dropped or not. + +[id="{version}-plugins-{type}s-{plugin}-metric_field_name"] +===== `metric_field_name` + + * Value type is <> + * Default value is `"metric"` + +Name of the field the metric name will be written to. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A new matrics event will be created for each metric field in this list. +All fields in this list will be removed from generated events. + +[id="{version}-plugins-{type}s-{plugin}-value_field_name"] +===== `value_field_name` + + * Value type is <> + * Default value is `"value"` + +Name of the field the metric value will be written to. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/metricize-v3.0.2.asciidoc b/docs/versioned-plugins/filters/metricize-v3.0.2.asciidoc new file mode 100644 index 000000000..9f06ad1d9 --- /dev/null +++ b/docs/versioned-plugins/filters/metricize-v3.0.2.asciidoc @@ -0,0 +1,109 @@ +:plugin: metricize +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-metricize/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Metricize filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The metricize filter takes complex events containing a number of metrics +and splits these up into multiple events, each holding a single metric. + +Example: + + Assume the following filter configuration: + + filter { + metricize { + metrics => [ "metric1", "metric2" ] + } + } + + Assuming the following event is passed in: + + { + type => "type A" + metric1 => "value1" + metric2 => "value2" + } + + This will result in the following 2 events being generated in addition to the original event: + + { { + type => "type A" type => "type A" + metric => "metric1" metric => "metric2" + value => "value1" value => "value2" + } } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Metricize Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-drop_original_event>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metric_field_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-value_field_name>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-drop_original_event"] +===== `drop_original_event` + + * Value type is <> + * Default value is `false` + +Flag indicating whether the original event should be dropped or not. + +[id="{version}-plugins-{type}s-{plugin}-metric_field_name"] +===== `metric_field_name` + + * Value type is <> + * Default value is `"metric"` + +Name of the field the metric name will be written to. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A new matrics event will be created for each metric field in this list. +All fields in this list will be removed from generated events. + +[id="{version}-plugins-{type}s-{plugin}-value_field_name"] +===== `value_field_name` + + * Value type is <> + * Default value is `"value"` + +Name of the field the metric value will be written to. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/metricize-v3.0.3.asciidoc b/docs/versioned-plugins/filters/metricize-v3.0.3.asciidoc new file mode 100644 index 000000000..cffc8245b --- /dev/null +++ b/docs/versioned-plugins/filters/metricize-v3.0.3.asciidoc @@ -0,0 +1,109 @@ +:plugin: metricize +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-metricize/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Metricize filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The metricize filter takes complex events containing a number of metrics +and splits these up into multiple events, each holding a single metric. + +Example: + + Assume the following filter configuration: + + filter { + metricize { + metrics => [ "metric1", "metric2" ] + } + } + + Assuming the following event is passed in: + + { + type => "type A" + metric1 => "value1" + metric2 => "value2" + } + + This will result in the following 2 events being generated in addition to the original event: + + { { + type => "type A" type => "type A" + metric => "metric1" metric => "metric2" + value => "value1" value => "value2" + } } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Metricize Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-drop_original_event>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metric_field_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-value_field_name>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-drop_original_event"] +===== `drop_original_event` + + * Value type is <> + * Default value is `false` + +Flag indicating whether the original event should be dropped or not. + +[id="{version}-plugins-{type}s-{plugin}-metric_field_name"] +===== `metric_field_name` + + * Value type is <> + * Default value is `"metric"` + +Name of the field the metric name will be written to. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A new matrics event will be created for each metric field in this list. +All fields in this list will be removed from generated events. + +[id="{version}-plugins-{type}s-{plugin}-value_field_name"] +===== `value_field_name` + + * Value type is <> + * Default value is `"value"` + +Name of the field the metric value will be written to. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/metrics-index.asciidoc b/docs/versioned-plugins/filters/metrics-index.asciidoc new file mode 100644 index 000000000..4d84457dc --- /dev/null +++ b/docs/versioned-plugins/filters/metrics-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: metrics +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::metrics-v4.0.5.asciidoc[] +include::metrics-v4.0.4.asciidoc[] +include::metrics-v4.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/filters/metrics-v4.0.3.asciidoc b/docs/versioned-plugins/filters/metrics-v4.0.3.asciidoc new file mode 100644 index 000000000..15cd2d72e --- /dev/null +++ b/docs/versioned-plugins/filters/metrics-v4.0.3.asciidoc @@ -0,0 +1,228 @@ +:plugin: metrics +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-metrics/blob/v4.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Metrics filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The metrics filter is useful for aggregating metrics. + +IMPORTANT: Elasticsearch 2.0 no longer allows field names with dots. Version 3.0 +of the metrics filter plugin changes behavior to use nested fields rather than +dotted notation to avoid colliding with versions of Elasticsearch 2.0+. Please +note the changes in the documentation (underscores and sub-fields used). + +For example, if you have a field `response` that is +a http response code, and you want to count each +kind of response, you can do this: +[source,ruby] + filter { + metrics { + meter => [ "http_%{response}" ] + add_tag => "metric" + } + } + +Metrics are flushed every 5 seconds by default or according to +`flush_interval`. Metrics appear as +new events in the event stream and go through any filters +that occur after as well as outputs. + +In general, you will want to add a tag to your metrics and have an output +explicitly look for that tag. + +The event that is flushed will include every 'meter' and 'timer' +metric in the following way: + +==== `meter` values + +For a `meter => "something"` you will receive the following fields: + +* "[thing][count]" - the total count of events +* "[thing][rate_1m]" - the per-second event rate in a 1-minute sliding window +* "[thing][rate_5m]" - the per-second event rate in a 5-minute sliding window +* "[thing][rate_15m]" - the per-second event rate in a 15-minute sliding window + +==== `timer` values + +For a `timer => [ "thing", "%{duration}" ]` you will receive the following fields: + +* "[thing][count]" - the total count of events +* "[thing][rate_1m]" - the per-second event rate in a 1-minute sliding window +* "[thing][rate_5m]" - the per-second event rate in a 5-minute sliding window +* "[thing][rate_15m]" - the per-second event rate in a 15-minute sliding window +* "[thing][min]" - the minimum value seen for this metric +* "[thing][max]" - the maximum value seen for this metric +* "[thing][stddev]" - the standard deviation for this metric +* "[thing][mean]" - the mean for this metric +* "[thing][pXX]" - the XXth percentile for this metric (see `percentiles`) + +The default lengths of the event rate window (1, 5, and 15 minutes) +can be configured with the `rates` option. + +==== Example: Computing event rate + +For a simple example, let's track how many events per second are running +through logstash: +[source,ruby] +---- + input { + generator { + type => "generated" + } + } + + filter { + if [type] == "generated" { + metrics { + meter => "events" + add_tag => "metric" + } + } + } + + output { + # only emit events with the 'metric' tag + if "metric" in [tags] { + stdout { + codec => line { + format => "rate: %{[events][rate_1m]}" + } + } + } + } +---- + +Running the above: +[source,ruby] + % bin/logstash -f example.conf + rate: 23721.983566819246 + rate: 24811.395722536377 + rate: 25875.892745934525 + rate: 26836.42375967113 + +We see the output includes our events' 1-minute rate. + +In the real world, you would emit this to graphite or another metrics store, +like so: +[source,ruby] + output { + graphite { + metrics => [ "events.rate_1m", "%{[events][rate_1m]}" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Metrics Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-clear_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ignore_older_than>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-meter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-percentiles>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-rates>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timer>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-clear_interval"] +===== `clear_interval` + + * Value type is <> + * Default value is `-1` + +The clear interval, when all counter are reset. + +If set to -1, the default value, the metrics will never be cleared. +Otherwise, should be a multiple of 5s. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is <> + * Default value is `5` + +The flush interval, when the metrics event is created. Must be a multiple of 5s. + +[id="{version}-plugins-{type}s-{plugin}-ignore_older_than"] +===== `ignore_older_than` + + * Value type is <> + * Default value is `0` + +Don't track events that have `@timestamp` older than some number of seconds. + +This is useful if you want to only include events that are near real-time +in your metrics. + +For example, to only count events that are within 10 seconds of real-time, you +would do this: + + filter { + metrics { + meter => [ "hits" ] + ignore_older_than => 10 + } + } + +[id="{version}-plugins-{type}s-{plugin}-meter"] +===== `meter` + + * Value type is <> + * Default value is `[]` + +syntax: `meter => [ "name of metric", "name of metric" ]` + +[id="{version}-plugins-{type}s-{plugin}-percentiles"] +===== `percentiles` + + * Value type is <> + * Default value is `[1, 5, 10, 90, 95, 99, 100]` + +The percentiles that should be measured and emitted for timer values. + +[id="{version}-plugins-{type}s-{plugin}-rates"] +===== `rates` + + * Value type is <> + * Default value is `[1, 5, 15]` + +The rates that should be measured, in minutes. +Possible values are 1, 5, and 15. + +[id="{version}-plugins-{type}s-{plugin}-timer"] +===== `timer` + + * Value type is <> + * Default value is `{}` + +syntax: `timer => [ "name of metric", "%{time_value}" ]` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/metrics-v4.0.4.asciidoc b/docs/versioned-plugins/filters/metrics-v4.0.4.asciidoc new file mode 100644 index 000000000..f88dd5a2d --- /dev/null +++ b/docs/versioned-plugins/filters/metrics-v4.0.4.asciidoc @@ -0,0 +1,228 @@ +:plugin: metrics +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-metrics/blob/v4.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Metrics filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The metrics filter is useful for aggregating metrics. + +IMPORTANT: Elasticsearch 2.0 no longer allows field names with dots. Version 3.0 +of the metrics filter plugin changes behavior to use nested fields rather than +dotted notation to avoid colliding with versions of Elasticsearch 2.0+. Please +note the changes in the documentation (underscores and sub-fields used). + +For example, if you have a field `response` that is +a http response code, and you want to count each +kind of response, you can do this: +[source,ruby] + filter { + metrics { + meter => [ "http_%{response}" ] + add_tag => "metric" + } + } + +Metrics are flushed every 5 seconds by default or according to +`flush_interval`. Metrics appear as +new events in the event stream and go through any filters +that occur after as well as outputs. + +In general, you will want to add a tag to your metrics and have an output +explicitly look for that tag. + +The event that is flushed will include every 'meter' and 'timer' +metric in the following way: + +==== `meter` values + +For a `meter => "something"` you will receive the following fields: + +* "[thing][count]" - the total count of events +* "[thing][rate_1m]" - the per-second event rate in a 1-minute sliding window +* "[thing][rate_5m]" - the per-second event rate in a 5-minute sliding window +* "[thing][rate_15m]" - the per-second event rate in a 15-minute sliding window + +==== `timer` values + +For a `timer => { "thing" => "%{duration}" }` you will receive the following fields: + +* "[thing][count]" - the total count of events +* "[thing][rate_1m]" - the per-second average value in a 1-minute sliding window +* "[thing][rate_5m]" - the per-second average value in a 5-minute sliding window +* "[thing][rate_15m]" - the per-second average value in a 15-minute sliding window +* "[thing][min]" - the minimum value seen for this metric +* "[thing][max]" - the maximum value seen for this metric +* "[thing][stddev]" - the standard deviation for this metric +* "[thing][mean]" - the mean for this metric +* "[thing][pXX]" - the XXth percentile for this metric (see `percentiles`) + +The default lengths of the event rate window (1, 5, and 15 minutes) +can be configured with the `rates` option. + +==== Example: Computing event rate + +For a simple example, let's track how many events per second are running +through logstash: +[source,ruby] +---- + input { + generator { + type => "generated" + } + } + + filter { + if [type] == "generated" { + metrics { + meter => "events" + add_tag => "metric" + } + } + } + + output { + # only emit events with the 'metric' tag + if "metric" in [tags] { + stdout { + codec => line { + format => "rate: %{[events][rate_1m]}" + } + } + } + } +---- + +Running the above: +[source,ruby] + % bin/logstash -f example.conf + rate: 23721.983566819246 + rate: 24811.395722536377 + rate: 25875.892745934525 + rate: 26836.42375967113 + +We see the output includes our events' 1-minute rate. + +In the real world, you would emit this to graphite or another metrics store, +like so: +[source,ruby] + output { + graphite { + metrics => [ "events.rate_1m", "%{[events][rate_1m]}" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Metrics Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-clear_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ignore_older_than>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-meter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-percentiles>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-rates>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timer>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-clear_interval"] +===== `clear_interval` + + * Value type is <> + * Default value is `-1` + +The clear interval, when all counter are reset. + +If set to -1, the default value, the metrics will never be cleared. +Otherwise, should be a multiple of 5s. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is <> + * Default value is `5` + +The flush interval, when the metrics event is created. Must be a multiple of 5s. + +[id="{version}-plugins-{type}s-{plugin}-ignore_older_than"] +===== `ignore_older_than` + + * Value type is <> + * Default value is `0` + +Don't track events that have `@timestamp` older than some number of seconds. + +This is useful if you want to only include events that are near real-time +in your metrics. + +For example, to only count events that are within 10 seconds of real-time, you +would do this: + + filter { + metrics { + meter => [ "hits" ] + ignore_older_than => 10 + } + } + +[id="{version}-plugins-{type}s-{plugin}-meter"] +===== `meter` + + * Value type is <> + * Default value is `[]` + +syntax: `meter => [ "name of metric", "name of metric" ]` + +[id="{version}-plugins-{type}s-{plugin}-percentiles"] +===== `percentiles` + + * Value type is <> + * Default value is `[1, 5, 10, 90, 95, 99, 100]` + +The percentiles that should be measured and emitted for timer values. + +[id="{version}-plugins-{type}s-{plugin}-rates"] +===== `rates` + + * Value type is <> + * Default value is `[1, 5, 15]` + +The rates that should be measured, in minutes. +Possible values are 1, 5, and 15. + +[id="{version}-plugins-{type}s-{plugin}-timer"] +===== `timer` + + * Value type is <> + * Default value is `{}` + +syntax: `timer => [ "name of metric", "%{time_value}" ]` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/metrics-v4.0.5.asciidoc b/docs/versioned-plugins/filters/metrics-v4.0.5.asciidoc new file mode 100644 index 000000000..6562f7a72 --- /dev/null +++ b/docs/versioned-plugins/filters/metrics-v4.0.5.asciidoc @@ -0,0 +1,228 @@ +:plugin: metrics +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-metrics/blob/v4.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Metrics filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The metrics filter is useful for aggregating metrics. + +IMPORTANT: Elasticsearch 2.0 no longer allows field names with dots. Version 3.0 +of the metrics filter plugin changes behavior to use nested fields rather than +dotted notation to avoid colliding with versions of Elasticsearch 2.0+. Please +note the changes in the documentation (underscores and sub-fields used). + +For example, if you have a field `response` that is +a http response code, and you want to count each +kind of response, you can do this: +[source,ruby] + filter { + metrics { + meter => [ "http_%{response}" ] + add_tag => "metric" + } + } + +Metrics are flushed every 5 seconds by default or according to +`flush_interval`. Metrics appear as +new events in the event stream and go through any filters +that occur after as well as outputs. + +In general, you will want to add a tag to your metrics and have an output +explicitly look for that tag. + +The event that is flushed will include every 'meter' and 'timer' +metric in the following way: + +==== `meter` values + +For a `meter => "something"` you will receive the following fields: + +* "[thing][count]" - the total count of events +* "[thing][rate_1m]" - the per-second event rate in a 1-minute sliding window +* "[thing][rate_5m]" - the per-second event rate in a 5-minute sliding window +* "[thing][rate_15m]" - the per-second event rate in a 15-minute sliding window + +==== `timer` values + +For a `timer => { "thing" => "%{duration}" }` you will receive the following fields: + +* "[thing][count]" - the total count of events +* "[thing][rate_1m]" - the per-second average value in a 1-minute sliding window +* "[thing][rate_5m]" - the per-second average value in a 5-minute sliding window +* "[thing][rate_15m]" - the per-second average value in a 15-minute sliding window +* "[thing][min]" - the minimum value seen for this metric +* "[thing][max]" - the maximum value seen for this metric +* "[thing][stddev]" - the standard deviation for this metric +* "[thing][mean]" - the mean for this metric +* "[thing][pXX]" - the XXth percentile for this metric (see `percentiles`) + +The default lengths of the event rate window (1, 5, and 15 minutes) +can be configured with the `rates` option. + +==== Example: Computing event rate + +For a simple example, let's track how many events per second are running +through logstash: +[source,ruby] +---- + input { + generator { + type => "generated" + } + } + + filter { + if [type] == "generated" { + metrics { + meter => "events" + add_tag => "metric" + } + } + } + + output { + # only emit events with the 'metric' tag + if "metric" in [tags] { + stdout { + codec => line { + format => "rate: %{[events][rate_1m]}" + } + } + } + } +---- + +Running the above: +[source,ruby] + % bin/logstash -f example.conf + rate: 23721.983566819246 + rate: 24811.395722536377 + rate: 25875.892745934525 + rate: 26836.42375967113 + +We see the output includes our events' 1-minute rate. + +In the real world, you would emit this to graphite or another metrics store, +like so: +[source,ruby] + output { + graphite { + metrics => [ "events.rate_1m", "%{[events][rate_1m]}" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Metrics Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-clear_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ignore_older_than>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-meter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-percentiles>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-rates>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timer>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-clear_interval"] +===== `clear_interval` + + * Value type is <> + * Default value is `-1` + +The clear interval, when all counter are reset. + +If set to -1, the default value, the metrics will never be cleared. +Otherwise, should be a multiple of 5s. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is <> + * Default value is `5` + +The flush interval, when the metrics event is created. Must be a multiple of 5s. + +[id="{version}-plugins-{type}s-{plugin}-ignore_older_than"] +===== `ignore_older_than` + + * Value type is <> + * Default value is `0` + +Don't track events that have `@timestamp` older than some number of seconds. + +This is useful if you want to only include events that are near real-time +in your metrics. + +For example, to only count events that are within 10 seconds of real-time, you +would do this: + + filter { + metrics { + meter => [ "hits" ] + ignore_older_than => 10 + } + } + +[id="{version}-plugins-{type}s-{plugin}-meter"] +===== `meter` + + * Value type is <> + * Default value is `[]` + +syntax: `meter => [ "name of metric", "name of metric" ]` + +[id="{version}-plugins-{type}s-{plugin}-percentiles"] +===== `percentiles` + + * Value type is <> + * Default value is `[1, 5, 10, 90, 95, 99, 100]` + +The percentiles that should be measured and emitted for timer values. + +[id="{version}-plugins-{type}s-{plugin}-rates"] +===== `rates` + + * Value type is <> + * Default value is `[1, 5, 15]` + +The rates that should be measured, in minutes. +Possible values are 1, 5, and 15. + +[id="{version}-plugins-{type}s-{plugin}-timer"] +===== `timer` + + * Value type is <> + * Default value is `{}` + +syntax: `timer => [ "name of metric", "%{time_value}" ]` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/multiline-index.asciidoc b/docs/versioned-plugins/filters/multiline-index.asciidoc new file mode 100644 index 000000000..3e821bbfa --- /dev/null +++ b/docs/versioned-plugins/filters/multiline-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: multiline +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::multiline-v3.0.4.asciidoc[] +include::multiline-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/filters/multiline-v3.0.3.asciidoc b/docs/versioned-plugins/filters/multiline-v3.0.3.asciidoc new file mode 100644 index 000000000..499ff61fb --- /dev/null +++ b/docs/versioned-plugins/filters/multiline-v3.0.3.asciidoc @@ -0,0 +1,194 @@ +:plugin: multiline +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-multiline/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Multiline filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + +This filter will collapse multiline messages from a single source into one Logstash event. + +The original goal of this filter was to allow joining of multi-line messages +from files into a single event. For example - joining java exception and +stacktrace messages into a single event. + +NOTE: This filter will not work with multiple worker threads `-w 2` on the logstash command line. + +The config looks like this: +[source,ruby] + filter { + multiline { + pattern => "pattern, a regexp" + negate => boolean + what => "previous" or "next" + } + } + +The `pattern` should be a regexp (<> patterns are +supported) which matches what you believe to be an indicator that the field +is part of an event consisting of multiple lines of log data. + +The `what` must be `previous` or `next` and indicates the relation +to the multi-line event. + +The `negate` can be `true` or `false` (defaults to `false`). If `true`, a +message not matching the pattern will constitute a match of the multiline +filter and the `what` will be applied. (vice-versa is also true) + +For example, Java stack traces are multiline and usually have the message +starting at the far-left, with each subsequent line indented. Do this: +[source,ruby] + filter { + multiline { + pattern => "^\s" + what => "previous" + } + } + +This says that any line starting with whitespace belongs to the previous line. + +Another example is C line continuations (backslash). Here's how to do that: +[source,ruby] + filter { + multiline { + pattern => "\\$" + what => "next" + } + } + +This says that any line ending with a backslash should be combined with the +following line. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Multiline Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-allow_duplicates>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_age>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pattern>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-stream_identity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-what>> |<>, one of `["previous", "next"]`|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-allow_duplicates"] +===== `allow_duplicates` + + * Value type is <> + * Default value is `true` + +Allow duplcate values on the source field. + +[id="{version}-plugins-{type}s-{plugin}-max_age"] +===== `max_age` + + * Value type is <> + * Default value is `5` + +The maximum age an event can be (in seconds) before it is automatically +flushed. + +[id="{version}-plugins-{type}s-{plugin}-negate"] +===== `negate` + + * Value type is <> + * Default value is `false` + +Negate the regexp pattern ('if not matched') + +[id="{version}-plugins-{type}s-{plugin}-pattern"] +===== `pattern` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The expression to match. The same matching engine as the +<> is used, so the expression can contain +a plain regular expression or one that also contains grok patterns. + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is <> + * Default value is `[]` + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The field name to execute the pattern match on. + +[id="{version}-plugins-{type}s-{plugin}-stream_identity"] +===== `stream_identity` + + * Value type is <> + * Default value is `"%{host}.%{path}.%{type}"` + +The stream identity is how the multiline filter determines which stream an +event belongs to. This is generally used for differentiating, say, events +coming from multiple files in the same file input, or multiple connections +coming from a tcp input. + +The default value here is usually what you want, but there are some cases +where you want to change it. One such example is if you are using a tcp +input with only one client connecting at any time. If that client +reconnects (due to error or client restart), then logstash will identify +the new connection as a new stream and break any multiline goodness that +may have occurred between the old and new connection. To solve this use +case, you can use `%{@source_host}.%{@type}` instead. + +[id="{version}-plugins-{type}s-{plugin}-what"] +===== `what` + + * This is a required setting. + * Value can be any of: `previous`, `next` + * There is no default value for this setting. + +If the pattern matched, does event belong to the next or previous event? + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/multiline-v3.0.4.asciidoc b/docs/versioned-plugins/filters/multiline-v3.0.4.asciidoc new file mode 100644 index 000000000..6fe0f4e0d --- /dev/null +++ b/docs/versioned-plugins/filters/multiline-v3.0.4.asciidoc @@ -0,0 +1,194 @@ +:plugin: multiline +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-multiline/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Multiline filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + +This filter will collapse multiline messages from a single source into one Logstash event. + +The original goal of this filter was to allow joining of multi-line messages +from files into a single event. For example - joining java exception and +stacktrace messages into a single event. + +NOTE: This filter will not work with multiple worker threads `-w 2` on the logstash command line. + +The config looks like this: +[source,ruby] + filter { + multiline { + pattern => "pattern, a regexp" + negate => boolean + what => "previous" or "next" + } + } + +The `pattern` should be a regexp (<> patterns are +supported) which matches what you believe to be an indicator that the field +is part of an event consisting of multiple lines of log data. + +The `what` must be `previous` or `next` and indicates the relation +to the multi-line event. + +The `negate` can be `true` or `false` (defaults to `false`). If `true`, a +message not matching the pattern will constitute a match of the multiline +filter and the `what` will be applied. (vice-versa is also true) + +For example, Java stack traces are multiline and usually have the message +starting at the far-left, with each subsequent line indented. Do this: +[source,ruby] + filter { + multiline { + pattern => "^\s" + what => "previous" + } + } + +This says that any line starting with whitespace belongs to the previous line. + +Another example is C line continuations (backslash). Here's how to do that: +[source,ruby] + filter { + multiline { + pattern => "\\$" + what => "next" + } + } + +This says that any line ending with a backslash should be combined with the +following line. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Multiline Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-allow_duplicates>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_age>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pattern>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-stream_identity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-what>> |<>, one of `["previous", "next"]`|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-allow_duplicates"] +===== `allow_duplicates` + + * Value type is <> + * Default value is `true` + +Allow duplcate values on the source field. + +[id="{version}-plugins-{type}s-{plugin}-max_age"] +===== `max_age` + + * Value type is <> + * Default value is `5` + +The maximum age an event can be (in seconds) before it is automatically +flushed. + +[id="{version}-plugins-{type}s-{plugin}-negate"] +===== `negate` + + * Value type is <> + * Default value is `false` + +Negate the regexp pattern ('if not matched') + +[id="{version}-plugins-{type}s-{plugin}-pattern"] +===== `pattern` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The expression to match. The same matching engine as the +<> is used, so the expression can contain +a plain regular expression or one that also contains grok patterns. + +[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] +===== `patterns_dir` + + * Value type is <> + * Default value is `[]` + +Logstash ships by default with a bunch of patterns, so you don't +necessarily need to define this yourself unless you are adding additional +patterns. + +Pattern files are plain text with format: +[source,ruby] + NAME PATTERN + +For example: +[source,ruby] + NUMBER \d+ + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The field name to execute the pattern match on. + +[id="{version}-plugins-{type}s-{plugin}-stream_identity"] +===== `stream_identity` + + * Value type is <> + * Default value is `"%{host}.%{path}.%{type}"` + +The stream identity is how the multiline filter determines which stream an +event belongs to. This is generally used for differentiating, say, events +coming from multiple files in the same file input, or multiple connections +coming from a tcp input. + +The default value here is usually what you want, but there are some cases +where you want to change it. One such example is if you are using a tcp +input with only one client connecting at any time. If that client +reconnects (due to error or client restart), then logstash will identify +the new connection as a new stream and break any multiline goodness that +may have occurred between the old and new connection. To solve this use +case, you can use `%{@source_host}.%{@type}` instead. + +[id="{version}-plugins-{type}s-{plugin}-what"] +===== `what` + + * This is a required setting. + * Value can be any of: `previous`, `next` + * There is no default value for this setting. + +If the pattern matched, does event belong to the next or previous event? + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/mutate-index.asciidoc b/docs/versioned-plugins/filters/mutate-index.asciidoc new file mode 100644 index 000000000..244760df3 --- /dev/null +++ b/docs/versioned-plugins/filters/mutate-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: mutate +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-28 +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::mutate-v3.2.0.asciidoc[] +include::mutate-v3.1.7.asciidoc[] +include::mutate-v3.1.6.asciidoc[] +include::mutate-v3.1.5.asciidoc[] + diff --git a/docs/versioned-plugins/filters/mutate-v3.1.5.asciidoc b/docs/versioned-plugins/filters/mutate-v3.1.5.asciidoc new file mode 100644 index 000000000..5cb812257 --- /dev/null +++ b/docs/versioned-plugins/filters/mutate-v3.1.5.asciidoc @@ -0,0 +1,282 @@ +:plugin: mutate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-mutate/blob/v3.1.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Mutate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The mutate filter allows you to perform general mutations on fields. You +can rename, remove, replace, and modify fields in your events. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Mutate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-copy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gsub>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-join>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lowercase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-merge>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-rename>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-replace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-split>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-strip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-update>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-uppercase>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-convert"] +===== `convert` + + * Value type is <> + * There is no default value for this setting. + +Convert a field's value to a different type, like turning a string to an +integer. If the field value is an array, all members will be converted. +If the field is a hash, no action will be taken. + +If the conversion type is `boolean`, the acceptable values are: + +* **True:** `true`, `t`, `yes`, `y`, and `1` +* **False:** `false`, `f`, `no`, `n`, and `0` + +If a value other than these is provided, it will pass straight through +and log a warning message. + +Valid conversion targets are: integer, float, string, and boolean. + +Example: +[source,ruby] + filter { + mutate { + convert => { "fieldname" => "integer" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-copy"] +===== `copy` + + * Value type is <> + * There is no default value for this setting. + +Copy an existing field to another field. Existing target field will be overriden. + +Example: +[source,ruby] + filter { + mutate { + copy => { "source_field" => "dest_field" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-gsub"] +===== `gsub` + + * Value type is <> + * There is no default value for this setting. + +Convert a string field by applying a regular expression and a replacement. +If the field is not a string, no action will be taken. + +This configuration takes an array consisting of 3 elements per +field/substitution. + +Be aware of escaping any backslash in the config file. + +Example: +[source,ruby] + filter { + mutate { + gsub => [ + # replace all forward slashes with underscore + "fieldname", "/", "_", + # replace backslashes, question marks, hashes, and minuses + # with a dot "." + "fieldname2", "[\\?#-]", "." + ] + } + } + + +[id="{version}-plugins-{type}s-{plugin}-join"] +===== `join` + + * Value type is <> + * There is no default value for this setting. + +Join an array with a separator character. Does nothing on non-array fields. + +Example: +[source,ruby] + filter { + mutate { + join => { "fieldname" => "," } + } + } + +[id="{version}-plugins-{type}s-{plugin}-lowercase"] +===== `lowercase` + + * Value type is <> + * There is no default value for this setting. + +Convert a string to its lowercase equivalent. + +Example: +[source,ruby] + filter { + mutate { + lowercase => [ "fieldname" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-merge"] +===== `merge` + + * Value type is <> + * There is no default value for this setting. + +Merge two fields of arrays or hashes. +String fields will be automatically be converted into an array, so: +========================== + `array` + `string` will work + `string` + `string` will result in an 2 entry array in `dest_field` + `array` and `hash` will not work +========================== +Example: +[source,ruby] + filter { + mutate { + merge => { "dest_field" => "added_field" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-rename"] +===== `rename` + + * Value type is <> + * There is no default value for this setting. + +Rename one or more fields. + +Example: +[source,ruby] + filter { + mutate { + # Renames the 'HOSTORIP' field to 'client_ip' + rename => { "HOSTORIP" => "client_ip" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-replace"] +===== `replace` + + * Value type is <> + * There is no default value for this setting. + +Replace a field with a new value. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Example: +[source,ruby] + filter { + mutate { + replace => { "message" => "%{source_host}: My new message" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-split"] +===== `split` + + * Value type is <> + * There is no default value for this setting. + +Split a field to an array using a separator character. Only works on string +fields. + +Example: +[source,ruby] + filter { + mutate { + split => { "fieldname" => "," } + } + } + +[id="{version}-plugins-{type}s-{plugin}-strip"] +===== `strip` + + * Value type is <> + * There is no default value for this setting. + +Strip whitespace from field. NOTE: this only works on leading and trailing whitespace. + +Example: +[source,ruby] + filter { + mutate { + strip => ["field1", "field2"] + } + } + +[id="{version}-plugins-{type}s-{plugin}-update"] +===== `update` + + * Value type is <> + * There is no default value for this setting. + +Update an existing field with a new value. If the field does not exist, +then no action will be taken. + +Example: +[source,ruby] + filter { + mutate { + update => { "sample" => "My new message" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-uppercase"] +===== `uppercase` + + * Value type is <> + * There is no default value for this setting. + +Convert a string to its uppercase equivalent. + +Example: +[source,ruby] + filter { + mutate { + uppercase => [ "fieldname" ] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/mutate-v3.1.6.asciidoc b/docs/versioned-plugins/filters/mutate-v3.1.6.asciidoc new file mode 100644 index 000000000..d8cc45f7d --- /dev/null +++ b/docs/versioned-plugins/filters/mutate-v3.1.6.asciidoc @@ -0,0 +1,283 @@ +:plugin: mutate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.6 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-mutate/blob/v3.1.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Mutate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The mutate filter allows you to perform general mutations on fields. You +can rename, remove, replace, and modify fields in your events. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Mutate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-copy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gsub>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-join>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lowercase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-merge>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-rename>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-replace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-split>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-strip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-update>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-uppercase>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-convert"] +===== `convert` + + * Value type is <> + * There is no default value for this setting. + +Convert a field's value to a different type, like turning a string to an +integer. If the field value is an array, all members will be converted. +If the field is a hash, no action will be taken. + +If the conversion type is `boolean`, the acceptable values are: + +* **True:** `true`, `t`, `yes`, `y`, and `1` +* **False:** `false`, `f`, `no`, `n`, and `0` + +If a value other than these is provided, it will pass straight through +and log a warning message. + +Valid conversion targets are: integer, float, string, and boolean. + +Example: +[source,ruby] + filter { + mutate { + convert => { "fieldname" => "integer" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-copy"] +===== `copy` + + * Value type is <> + * There is no default value for this setting. + +Copy an existing field to another field. Existing target field will be overriden. + +Example: +[source,ruby] + filter { + mutate { + copy => { "source_field" => "dest_field" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-gsub"] +===== `gsub` + + * Value type is <> + * There is no default value for this setting. + +Match a regular expression against a field value and replace all matches +with a replacement string. Only fields that are strings or arrays of +strings are supported. For other kinds of fields no action will be taken. + +This configuration takes an array consisting of 3 elements per +field/substitution. + +Be aware of escaping any backslash in the config file. + +Example: +[source,ruby] + filter { + mutate { + gsub => [ + # replace all forward slashes with underscore + "fieldname", "/", "_", + # replace backslashes, question marks, hashes, and minuses + # with a dot "." + "fieldname2", "[\\?#-]", "." + ] + } + } + + +[id="{version}-plugins-{type}s-{plugin}-join"] +===== `join` + + * Value type is <> + * There is no default value for this setting. + +Join an array with a separator character. Does nothing on non-array fields. + +Example: +[source,ruby] + filter { + mutate { + join => { "fieldname" => "," } + } + } + +[id="{version}-plugins-{type}s-{plugin}-lowercase"] +===== `lowercase` + + * Value type is <> + * There is no default value for this setting. + +Convert a string to its lowercase equivalent. + +Example: +[source,ruby] + filter { + mutate { + lowercase => [ "fieldname" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-merge"] +===== `merge` + + * Value type is <> + * There is no default value for this setting. + +Merge two fields of arrays or hashes. +String fields will be automatically be converted into an array, so: +========================== + `array` + `string` will work + `string` + `string` will result in an 2 entry array in `dest_field` + `array` and `hash` will not work +========================== +Example: +[source,ruby] + filter { + mutate { + merge => { "dest_field" => "added_field" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-rename"] +===== `rename` + + * Value type is <> + * There is no default value for this setting. + +Rename one or more fields. + +Example: +[source,ruby] + filter { + mutate { + # Renames the 'HOSTORIP' field to 'client_ip' + rename => { "HOSTORIP" => "client_ip" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-replace"] +===== `replace` + + * Value type is <> + * There is no default value for this setting. + +Replace a field with a new value. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Example: +[source,ruby] + filter { + mutate { + replace => { "message" => "%{source_host}: My new message" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-split"] +===== `split` + + * Value type is <> + * There is no default value for this setting. + +Split a field to an array using a separator character. Only works on string +fields. + +Example: +[source,ruby] + filter { + mutate { + split => { "fieldname" => "," } + } + } + +[id="{version}-plugins-{type}s-{plugin}-strip"] +===== `strip` + + * Value type is <> + * There is no default value for this setting. + +Strip whitespace from field. NOTE: this only works on leading and trailing whitespace. + +Example: +[source,ruby] + filter { + mutate { + strip => ["field1", "field2"] + } + } + +[id="{version}-plugins-{type}s-{plugin}-update"] +===== `update` + + * Value type is <> + * There is no default value for this setting. + +Update an existing field with a new value. If the field does not exist, +then no action will be taken. + +Example: +[source,ruby] + filter { + mutate { + update => { "sample" => "My new message" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-uppercase"] +===== `uppercase` + + * Value type is <> + * There is no default value for this setting. + +Convert a string to its uppercase equivalent. + +Example: +[source,ruby] + filter { + mutate { + uppercase => [ "fieldname" ] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/mutate-v3.1.7.asciidoc b/docs/versioned-plugins/filters/mutate-v3.1.7.asciidoc new file mode 100644 index 000000000..29039dbc2 --- /dev/null +++ b/docs/versioned-plugins/filters/mutate-v3.1.7.asciidoc @@ -0,0 +1,283 @@ +:plugin: mutate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.7 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-mutate/blob/v3.1.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Mutate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The mutate filter allows you to perform general mutations on fields. You +can rename, remove, replace, and modify fields in your events. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Mutate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-copy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gsub>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-join>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lowercase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-merge>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-rename>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-replace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-split>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-strip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-update>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-uppercase>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-convert"] +===== `convert` + + * Value type is <> + * There is no default value for this setting. + +Convert a field's value to a different type, like turning a string to an +integer. If the field value is an array, all members will be converted. +If the field is a hash, no action will be taken. + +If the conversion type is `boolean`, the acceptable values are: + +* **True:** `true`, `t`, `yes`, `y`, and `1` +* **False:** `false`, `f`, `no`, `n`, and `0` + +If a value other than these is provided, it will pass straight through +and log a warning message. + +Valid conversion targets are: integer, float, string, and boolean. + +Example: +[source,ruby] + filter { + mutate { + convert => { "fieldname" => "integer" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-copy"] +===== `copy` + + * Value type is <> + * There is no default value for this setting. + +Copy an existing field to another field. Existing target field will be overriden. + +Example: +[source,ruby] + filter { + mutate { + copy => { "source_field" => "dest_field" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-gsub"] +===== `gsub` + + * Value type is <> + * There is no default value for this setting. + +Match a regular expression against a field value and replace all matches +with a replacement string. Only fields that are strings or arrays of +strings are supported. For other kinds of fields no action will be taken. + +This configuration takes an array consisting of 3 elements per +field/substitution. + +Be aware of escaping any backslash in the config file. + +Example: +[source,ruby] + filter { + mutate { + gsub => [ + # replace all forward slashes with underscore + "fieldname", "/", "_", + # replace backslashes, question marks, hashes, and minuses + # with a dot "." + "fieldname2", "[\\?#-]", "." + ] + } + } + + +[id="{version}-plugins-{type}s-{plugin}-join"] +===== `join` + + * Value type is <> + * There is no default value for this setting. + +Join an array with a separator character. Does nothing on non-array fields. + +Example: +[source,ruby] + filter { + mutate { + join => { "fieldname" => "," } + } + } + +[id="{version}-plugins-{type}s-{plugin}-lowercase"] +===== `lowercase` + + * Value type is <> + * There is no default value for this setting. + +Convert a string to its lowercase equivalent. + +Example: +[source,ruby] + filter { + mutate { + lowercase => [ "fieldname" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-merge"] +===== `merge` + + * Value type is <> + * There is no default value for this setting. + +Merge two fields of arrays or hashes. +String fields will be automatically be converted into an array, so: +========================== + `array` + `string` will work + `string` + `string` will result in an 2 entry array in `dest_field` + `array` and `hash` will not work +========================== +Example: +[source,ruby] + filter { + mutate { + merge => { "dest_field" => "added_field" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-rename"] +===== `rename` + + * Value type is <> + * There is no default value for this setting. + +Rename one or more fields. + +Example: +[source,ruby] + filter { + mutate { + # Renames the 'HOSTORIP' field to 'client_ip' + rename => { "HOSTORIP" => "client_ip" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-replace"] +===== `replace` + + * Value type is <> + * There is no default value for this setting. + +Replace a field with a new value. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Example: +[source,ruby] + filter { + mutate { + replace => { "message" => "%{source_host}: My new message" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-split"] +===== `split` + + * Value type is <> + * There is no default value for this setting. + +Split a field to an array using a separator character. Only works on string +fields. + +Example: +[source,ruby] + filter { + mutate { + split => { "fieldname" => "," } + } + } + +[id="{version}-plugins-{type}s-{plugin}-strip"] +===== `strip` + + * Value type is <> + * There is no default value for this setting. + +Strip whitespace from field. NOTE: this only works on leading and trailing whitespace. + +Example: +[source,ruby] + filter { + mutate { + strip => ["field1", "field2"] + } + } + +[id="{version}-plugins-{type}s-{plugin}-update"] +===== `update` + + * Value type is <> + * There is no default value for this setting. + +Update an existing field with a new value. If the field does not exist, +then no action will be taken. + +Example: +[source,ruby] + filter { + mutate { + update => { "sample" => "My new message" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-uppercase"] +===== `uppercase` + + * Value type is <> + * There is no default value for this setting. + +Convert a string to its uppercase equivalent. + +Example: +[source,ruby] + filter { + mutate { + uppercase => [ "fieldname" ] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/mutate-v3.2.0.asciidoc b/docs/versioned-plugins/filters/mutate-v3.2.0.asciidoc new file mode 100644 index 000000000..804b57d2f --- /dev/null +++ b/docs/versioned-plugins/filters/mutate-v3.2.0.asciidoc @@ -0,0 +1,287 @@ +:plugin: mutate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.0 +:release_date: 2017-11-28 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-mutate/blob/v3.2.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Mutate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The mutate filter allows you to perform general mutations on fields. You +can rename, remove, replace, and modify fields in your events. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Mutate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-copy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gsub>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-join>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lowercase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-merge>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-rename>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-replace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-split>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-strip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-update>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-uppercase>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-convert"] +===== `convert` + + * Value type is <> + * There is no default value for this setting. + +Convert a field's value to a different type, like turning a string to an +integer. If the field value is an array, all members will be converted. +If the field is a hash no action will be taken. + +If the conversion type is `boolean`, the acceptable values are: + +* **True:** `true`, `t`, `yes`, `y`, and `1` +* **False:** `false`, `f`, `no`, `n`, and `0` + +If a value other than these is provided, it will pass straight through +and log a warning message. + +If the conversion type is `integer` and the value is a boolean, it will be converted as: +* **True:** `1` +* **False:** `0` + +Valid conversion targets are: integer, float, string, and boolean. + +Example: +[source,ruby] + filter { + mutate { + convert => { "fieldname" => "integer" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-copy"] +===== `copy` + + * Value type is <> + * There is no default value for this setting. + +Copy an existing field to another field. Existing target field will be overriden. + +Example: +[source,ruby] + filter { + mutate { + copy => { "source_field" => "dest_field" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-gsub"] +===== `gsub` + + * Value type is <> + * There is no default value for this setting. + +Match a regular expression against a field value and replace all matches +with a replacement string. Only fields that are strings or arrays of +strings are supported. For other kinds of fields no action will be taken. + +This configuration takes an array consisting of 3 elements per +field/substitution. + +Be aware of escaping any backslash in the config file. + +Example: +[source,ruby] + filter { + mutate { + gsub => [ + # replace all forward slashes with underscore + "fieldname", "/", "_", + # replace backslashes, question marks, hashes, and minuses + # with a dot "." + "fieldname2", "[\\?#-]", "." + ] + } + } + + +[id="{version}-plugins-{type}s-{plugin}-join"] +===== `join` + + * Value type is <> + * There is no default value for this setting. + +Join an array with a separator character. Does nothing on non-array fields. + +Example: +[source,ruby] + filter { + mutate { + join => { "fieldname" => "," } + } + } + +[id="{version}-plugins-{type}s-{plugin}-lowercase"] +===== `lowercase` + + * Value type is <> + * There is no default value for this setting. + +Convert a string to its lowercase equivalent. + +Example: +[source,ruby] + filter { + mutate { + lowercase => [ "fieldname" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-merge"] +===== `merge` + + * Value type is <> + * There is no default value for this setting. + +Merge two fields of arrays or hashes. +String fields will be automatically be converted into an array, so: +========================== + `array` + `string` will work + `string` + `string` will result in an 2 entry array in `dest_field` + `array` and `hash` will not work +========================== +Example: +[source,ruby] + filter { + mutate { + merge => { "dest_field" => "added_field" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-rename"] +===== `rename` + + * Value type is <> + * There is no default value for this setting. + +Rename one or more fields. + +Example: +[source,ruby] + filter { + mutate { + # Renames the 'HOSTORIP' field to 'client_ip' + rename => { "HOSTORIP" => "client_ip" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-replace"] +===== `replace` + + * Value type is <> + * There is no default value for this setting. + +Replace a field with a new value. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +Example: +[source,ruby] + filter { + mutate { + replace => { "message" => "%{source_host}: My new message" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-split"] +===== `split` + + * Value type is <> + * There is no default value for this setting. + +Split a field to an array using a separator character. Only works on string +fields. + +Example: +[source,ruby] + filter { + mutate { + split => { "fieldname" => "," } + } + } + +[id="{version}-plugins-{type}s-{plugin}-strip"] +===== `strip` + + * Value type is <> + * There is no default value for this setting. + +Strip whitespace from field. NOTE: this only works on leading and trailing whitespace. + +Example: +[source,ruby] + filter { + mutate { + strip => ["field1", "field2"] + } + } + +[id="{version}-plugins-{type}s-{plugin}-update"] +===== `update` + + * Value type is <> + * There is no default value for this setting. + +Update an existing field with a new value. If the field does not exist, +then no action will be taken. + +Example: +[source,ruby] + filter { + mutate { + update => { "sample" => "My new message" } + } + } + +[id="{version}-plugins-{type}s-{plugin}-uppercase"] +===== `uppercase` + + * Value type is <> + * There is no default value for this setting. + +Convert a string to its uppercase equivalent. + +Example: +[source,ruby] + filter { + mutate { + uppercase => [ "fieldname" ] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/oui-index.asciidoc b/docs/versioned-plugins/filters/oui-index.asciidoc new file mode 100644 index 000000000..83bebcbef --- /dev/null +++ b/docs/versioned-plugins/filters/oui-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: oui +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::oui-v3.0.2.asciidoc[] +include::oui-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/oui-v3.0.1.asciidoc b/docs/versioned-plugins/filters/oui-v3.0.1.asciidoc new file mode 100644 index 000000000..c82cc5374 --- /dev/null +++ b/docs/versioned-plugins/filters/oui-v3.0.1.asciidoc @@ -0,0 +1,70 @@ +:plugin: oui +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-oui/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Oui filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Logstash filter to parse OUI data from MAC addresses + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Oui Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +Setting the config_name here is required. This is how you +configure this filter from your Logstash config. + +filter { + example { + message => "My message..." + } +} + +The source field to parse + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"oui"` + +The target field to place all the data + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/oui-v3.0.2.asciidoc b/docs/versioned-plugins/filters/oui-v3.0.2.asciidoc new file mode 100644 index 000000000..fb8e50dbe --- /dev/null +++ b/docs/versioned-plugins/filters/oui-v3.0.2.asciidoc @@ -0,0 +1,70 @@ +:plugin: oui +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-oui/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Oui filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Logstash filter to parse OUI data from MAC addresses + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Oui Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +Setting the config_name here is required. This is how you +configure this filter from your Logstash config. + +filter { + example { + message => "My message..." + } +} + +The source field to parse + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"oui"` + +The target field to place all the data + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/prune-index.asciidoc b/docs/versioned-plugins/filters/prune-index.asciidoc new file mode 100644 index 000000000..e9fe878f6 --- /dev/null +++ b/docs/versioned-plugins/filters/prune-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: prune +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::prune-v3.0.3.asciidoc[] +include::prune-v3.0.2.asciidoc[] +include::prune-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/prune-v3.0.1.asciidoc b/docs/versioned-plugins/filters/prune-v3.0.1.asciidoc new file mode 100644 index 000000000..b20b96a53 --- /dev/null +++ b/docs/versioned-plugins/filters/prune-v3.0.1.asciidoc @@ -0,0 +1,154 @@ +:plugin: prune +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-prune/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Prune filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The prune filter is for removing fields from events based on +whitelists or blacklist of field names or their values (names and +values can also be regular expressions). + +This can e.g. be useful if you have a <> +or <> filter that creates a number of fields +with names that you don't necessarily know the names of beforehand, +and you only want to keep a subset of them. + +Usage help: +To specify a exact field name or value use the regular expression syntax `^some_name_or_value$`. +Example usage: Input data `{ "msg":"hello world", "msg_short":"hw" }` +[source,ruby] + filter { + prune { + whitelist_names => [ "msg" ] + } + } +Allows both `"msg"` and `"msg_short"` through. + +While: +[source,ruby] + filter { + prune { + whitelist_names => ["^msg$"] + } + } +Allows only `"msg"` through. + +Logstash stores an event's `tags` as a field which is subject to pruning. Remember to `whitelist_names => [ "^tags$" ]` +to maintain `tags` after pruning or use `blacklist_values => [ "^tag_name$" ]` to eliminate a specific `tag`. + +NOTE: This filter currently only support operations on top-level fields, +i.e. whitelisting and blacklisting of subfields based on name or value +does not work. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Prune Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-blacklist_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-blacklist_values>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interpolate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-whitelist_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-whitelist_values>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-blacklist_names"] +===== `blacklist_names` + + * Value type is <> + * Default value is `["%{[^}]+}"]` + +Exclude fields whose names match specified regexps, by default exclude unresolved `%{field}` strings. +[source,ruby] + filter { + prune { + blacklist_names => [ "method", "(referrer|status)", "${some}_field" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-blacklist_values"] +===== `blacklist_values` + + * Value type is <> + * Default value is `{}` + +Exclude specified fields if their values match one of the supplied regular expressions. +In case field values are arrays, each array item is matched against the regular expressions and matching array items will be excluded. +[source,ruby] + filter { + prune { + blacklist_values => [ "uripath", "/index.php", + "method", "(HEAD|OPTIONS)", + "status", "^[^2]" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-interpolate"] +===== `interpolate` + + * Value type is <> + * Default value is `false` + +Trigger whether configuration fields and values should be interpolated for +dynamic values (when resolving `%{some_field}`). +Probably adds some performance overhead. Defaults to false. + +[id="{version}-plugins-{type}s-{plugin}-whitelist_names"] +===== `whitelist_names` + + * Value type is <> + * Default value is `[]` + +Include only fields only if their names match specified regexps, default to empty list which means include everything. +[source,ruby] + filter { + prune { + whitelist_names => [ "method", "(referrer|status)", "${some}_field" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-whitelist_values"] +===== `whitelist_values` + + * Value type is <> + * Default value is `{}` + +Include specified fields only if their values match one of the supplied regular expressions. +In case field values are arrays, each array item is matched against the regular expressions and only matching array items will be included. +[source,ruby] + filter { + prune { + whitelist_values => [ "uripath", "/index.php", + "method", "(GET|POST)", + "status", "^[^2]" ] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/prune-v3.0.2.asciidoc b/docs/versioned-plugins/filters/prune-v3.0.2.asciidoc new file mode 100644 index 000000000..4aa7058ef --- /dev/null +++ b/docs/versioned-plugins/filters/prune-v3.0.2.asciidoc @@ -0,0 +1,154 @@ +:plugin: prune +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-prune/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Prune filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The prune filter is for removing fields from events based on +whitelists or blacklist of field names or their values (names and +values can also be regular expressions). + +This can e.g. be useful if you have a <> +or <> filter that creates a number of fields +with names that you don't necessarily know the names of beforehand, +and you only want to keep a subset of them. + +Usage help: +To specify a exact field name or value use the regular expression syntax `^some_name_or_value$`. +Example usage: Input data `{ "msg":"hello world", "msg_short":"hw" }` +[source,ruby] + filter { + prune { + whitelist_names => [ "msg" ] + } + } +Allows both `"msg"` and `"msg_short"` through. + +While: +[source,ruby] + filter { + prune { + whitelist_names => ["^msg$"] + } + } +Allows only `"msg"` through. + +Logstash stores an event's `tags` as a field which is subject to pruning. Remember to `whitelist_names => [ "^tags$" ]` +to maintain `tags` after pruning or use `blacklist_values => [ "^tag_name$" ]` to eliminate a specific `tag`. + +NOTE: This filter currently only support operations on top-level fields, +i.e. whitelisting and blacklisting of subfields based on name or value +does not work. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Prune Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-blacklist_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-blacklist_values>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interpolate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-whitelist_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-whitelist_values>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-blacklist_names"] +===== `blacklist_names` + + * Value type is <> + * Default value is `["%{[^}]+}"]` + +Exclude fields whose names match specified regexps, by default exclude unresolved `%{field}` strings. +[source,ruby] + filter { + prune { + blacklist_names => [ "method", "(referrer|status)", "${some}_field" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-blacklist_values"] +===== `blacklist_values` + + * Value type is <> + * Default value is `{}` + +Exclude specified fields if their values match one of the supplied regular expressions. +In case field values are arrays, each array item is matched against the regular expressions and matching array items will be excluded. +[source,ruby] + filter { + prune { + blacklist_values => [ "uripath", "/index.php", + "method", "(HEAD|OPTIONS)", + "status", "^[^2]" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-interpolate"] +===== `interpolate` + + * Value type is <> + * Default value is `false` + +Trigger whether configuration fields and values should be interpolated for +dynamic values (when resolving `%{some_field}`). +Probably adds some performance overhead. Defaults to false. + +[id="{version}-plugins-{type}s-{plugin}-whitelist_names"] +===== `whitelist_names` + + * Value type is <> + * Default value is `[]` + +Include only fields only if their names match specified regexps, default to empty list which means include everything. +[source,ruby] + filter { + prune { + whitelist_names => [ "method", "(referrer|status)", "${some}_field" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-whitelist_values"] +===== `whitelist_values` + + * Value type is <> + * Default value is `{}` + +Include specified fields only if their values match one of the supplied regular expressions. +In case field values are arrays, each array item is matched against the regular expressions and only matching array items will be included. +[source,ruby] + filter { + prune { + whitelist_values => [ "uripath", "/index.php", + "method", "(GET|POST)", + "status", "^[^2]" ] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/prune-v3.0.3.asciidoc b/docs/versioned-plugins/filters/prune-v3.0.3.asciidoc new file mode 100644 index 000000000..b6203fb98 --- /dev/null +++ b/docs/versioned-plugins/filters/prune-v3.0.3.asciidoc @@ -0,0 +1,154 @@ +:plugin: prune +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-prune/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Prune filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The prune filter is for removing fields from events based on +whitelists or blacklist of field names or their values (names and +values can also be regular expressions). + +This can e.g. be useful if you have a <> +or <> filter that creates a number of fields +with names that you don't necessarily know the names of beforehand, +and you only want to keep a subset of them. + +Usage help: +To specify a exact field name or value use the regular expression syntax `^some_name_or_value$`. +Example usage: Input data `{ "msg":"hello world", "msg_short":"hw" }` +[source,ruby] + filter { + prune { + whitelist_names => [ "msg" ] + } + } +Allows both `"msg"` and `"msg_short"` through. + +While: +[source,ruby] + filter { + prune { + whitelist_names => ["^msg$"] + } + } +Allows only `"msg"` through. + +Logstash stores an event's `tags` as a field which is subject to pruning. Remember to `whitelist_names => [ "^tags$" ]` +to maintain `tags` after pruning or use `blacklist_values => [ "^tag_name$" ]` to eliminate a specific `tag`. + +NOTE: This filter currently only support operations on top-level fields, +i.e. whitelisting and blacklisting of subfields based on name or value +does not work. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Prune Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-blacklist_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-blacklist_values>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interpolate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-whitelist_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-whitelist_values>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-blacklist_names"] +===== `blacklist_names` + + * Value type is <> + * Default value is `["%{[^}]+}"]` + +Exclude fields whose names match specified regexps, by default exclude unresolved `%{field}` strings. +[source,ruby] + filter { + prune { + blacklist_names => [ "method", "(referrer|status)", "${some}_field" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-blacklist_values"] +===== `blacklist_values` + + * Value type is <> + * Default value is `{}` + +Exclude specified fields if their values match one of the supplied regular expressions. +In case field values are arrays, each array item is matched against the regular expressions and matching array items will be excluded. +[source,ruby] + filter { + prune { + blacklist_values => [ "uripath", "/index.php", + "method", "(HEAD|OPTIONS)", + "status", "^[^2]" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-interpolate"] +===== `interpolate` + + * Value type is <> + * Default value is `false` + +Trigger whether configuration fields and values should be interpolated for +dynamic values (when resolving `%{some_field}`). +Probably adds some performance overhead. Defaults to false. + +[id="{version}-plugins-{type}s-{plugin}-whitelist_names"] +===== `whitelist_names` + + * Value type is <> + * Default value is `[]` + +Include only fields only if their names match specified regexps, default to empty list which means include everything. +[source,ruby] + filter { + prune { + whitelist_names => [ "method", "(referrer|status)", "${some}_field" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-whitelist_values"] +===== `whitelist_values` + + * Value type is <> + * Default value is `{}` + +Include specified fields only if their values match one of the supplied regular expressions. +In case field values are arrays, each array item is matched against the regular expressions and only matching array items will be included. +[source,ruby] + filter { + prune { + whitelist_values => [ "uripath", "/index.php", + "method", "(GET|POST)", + "status", "^[^2]" ] + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/punct-index.asciidoc b/docs/versioned-plugins/filters/punct-index.asciidoc new file mode 100644 index 000000000..f58b661b6 --- /dev/null +++ b/docs/versioned-plugins/filters/punct-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: punct +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::punct-v2.0.6.asciidoc[] +include::punct-v2.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/filters/punct-v2.0.5.asciidoc b/docs/versioned-plugins/filters/punct-v2.0.5.asciidoc new file mode 100644 index 000000000..d7e75fe4e --- /dev/null +++ b/docs/versioned-plugins/filters/punct-v2.0.5.asciidoc @@ -0,0 +1,62 @@ +:plugin: punct +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-punct/blob/v2.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Punct filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Strip everything but punctuation from a field and store the remainder in the +a separate field. This is often used for fingerprinting log events. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Punct Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The field reference to use for punctuation stripping + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"punct"` + +The field to store the result. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/punct-v2.0.6.asciidoc b/docs/versioned-plugins/filters/punct-v2.0.6.asciidoc new file mode 100644 index 000000000..6386463c3 --- /dev/null +++ b/docs/versioned-plugins/filters/punct-v2.0.6.asciidoc @@ -0,0 +1,62 @@ +:plugin: punct +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.6 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-punct/blob/v2.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Punct filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Strip everything but punctuation from a field and store the remainder in the +a separate field. This is often used for fingerprinting log events. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Punct Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +The field reference to use for punctuation stripping + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"punct"` + +The field to store the result. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/range-index.asciidoc b/docs/versioned-plugins/filters/range-index.asciidoc new file mode 100644 index 000000000..b001173c2 --- /dev/null +++ b/docs/versioned-plugins/filters/range-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: range +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::range-v3.0.3.asciidoc[] +include::range-v3.0.2.asciidoc[] +include::range-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/range-v3.0.1.asciidoc b/docs/versioned-plugins/filters/range-v3.0.1.asciidoc new file mode 100644 index 000000000..65b94be6a --- /dev/null +++ b/docs/versioned-plugins/filters/range-v3.0.1.asciidoc @@ -0,0 +1,89 @@ +:plugin: range +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-range/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Range filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter is used to check that certain fields are within expected size/length ranges. +Supported types are numbers and strings. +Numbers are checked to be within numeric value range. +Strings are checked to be within string length range. +More than one range can be specified for same fieldname, actions will be applied incrementally. +When field value is within a specified range an action will be taken. +Supported actions are drop event, add tag, or add field with specified value. + +Example use cases are for histogram-like tagging of events +or for finding anomaly values in fields or too big events that should be dropped. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Range Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ranges>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-negate"] +===== `negate` + + * Value type is <> + * Default value is `false` + +Negate the range match logic, events should be outsize of the specified range to match. + +[id="{version}-plugins-{type}s-{plugin}-ranges"] +===== `ranges` + + * Value type is <> + * Default value is `[]` + +An array of field, min, max, action tuples. +Example: +[source,ruby] + filter { + range { + ranges => [ "message", 0, 10, "tag:short", + "message", 11, 100, "tag:medium", + "message", 101, 1000, "tag:long", + "message", 1001, 1e1000, "drop", + "duration", 0, 100, "field:latency:fast", + "duration", 101, 200, "field:latency:normal", + "duration", 201, 1000, "field:latency:slow", + "duration", 1001, 1e1000, "field:latency:outlier", + "requests", 0, 10, "tag:too_few_%{host}_requests" ] + } + } + +Supported actions are drop tag or field with specified value. +Added tag names and field names and field values can have `%{dynamic}` values. + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/range-v3.0.2.asciidoc b/docs/versioned-plugins/filters/range-v3.0.2.asciidoc new file mode 100644 index 000000000..da8bc0e56 --- /dev/null +++ b/docs/versioned-plugins/filters/range-v3.0.2.asciidoc @@ -0,0 +1,89 @@ +:plugin: range +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-range/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Range filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter is used to check that certain fields are within expected size/length ranges. +Supported types are numbers and strings. +Numbers are checked to be within numeric value range. +Strings are checked to be within string length range. +More than one range can be specified for same fieldname, actions will be applied incrementally. +When field value is within a specified range an action will be taken. +Supported actions are drop event, add tag, or add field with specified value. + +Example use cases are for histogram-like tagging of events +or for finding anomaly values in fields or too big events that should be dropped. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Range Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ranges>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-negate"] +===== `negate` + + * Value type is <> + * Default value is `false` + +Negate the range match logic, events should be outsize of the specified range to match. + +[id="{version}-plugins-{type}s-{plugin}-ranges"] +===== `ranges` + + * Value type is <> + * Default value is `[]` + +An array of field, min, max, action tuples. +Example: +[source,ruby] + filter { + range { + ranges => [ "message", 0, 10, "tag:short", + "message", 11, 100, "tag:medium", + "message", 101, 1000, "tag:long", + "message", 1001, 1e1000, "drop", + "duration", 0, 100, "field:latency:fast", + "duration", 101, 200, "field:latency:normal", + "duration", 201, 1000, "field:latency:slow", + "duration", 1001, 1e1000, "field:latency:outlier", + "requests", 0, 10, "tag:too_few_%{host}_requests" ] + } + } + +Supported actions are drop tag or field with specified value. +Added tag names and field names and field values can have `%{dynamic}` values. + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/range-v3.0.3.asciidoc b/docs/versioned-plugins/filters/range-v3.0.3.asciidoc new file mode 100644 index 000000000..524e9e30c --- /dev/null +++ b/docs/versioned-plugins/filters/range-v3.0.3.asciidoc @@ -0,0 +1,89 @@ +:plugin: range +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-range/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Range filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This filter is used to check that certain fields are within expected size/length ranges. +Supported types are numbers and strings. +Numbers are checked to be within numeric value range. +Strings are checked to be within string length range. +More than one range can be specified for same fieldname, actions will be applied incrementally. +When field value is within a specified range an action will be taken. +Supported actions are drop event, add tag, or add field with specified value. + +Example use cases are for histogram-like tagging of events +or for finding anomaly values in fields or too big events that should be dropped. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Range Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ranges>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-negate"] +===== `negate` + + * Value type is <> + * Default value is `false` + +Negate the range match logic, events should be outsize of the specified range to match. + +[id="{version}-plugins-{type}s-{plugin}-ranges"] +===== `ranges` + + * Value type is <> + * Default value is `[]` + +An array of field, min, max, action tuples. +Example: +[source,ruby] + filter { + range { + ranges => [ "message", 0, 10, "tag:short", + "message", 11, 100, "tag:medium", + "message", 101, 1000, "tag:long", + "message", 1001, 1e1000, "drop", + "duration", 0, 100, "field:latency:fast", + "duration", 101, 200, "field:latency:normal", + "duration", 201, 1000, "field:latency:slow", + "duration", 1001, 1e1000, "field:latency:outlier", + "requests", 0, 10, "tag:too_few_%{host}_requests" ] + } + } + +Supported actions are drop tag or field with specified value. +Added tag names and field names and field values can have `%{dynamic}` values. + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/ruby-index.asciidoc b/docs/versioned-plugins/filters/ruby-index.asciidoc new file mode 100644 index 000000000..b8c9bd579 --- /dev/null +++ b/docs/versioned-plugins/filters/ruby-index.asciidoc @@ -0,0 +1,22 @@ +:plugin: ruby +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-06 +| <> | 2017-11-28 +| <> | 2017-11-07 +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::ruby-v3.1.3.asciidoc[] +include::ruby-v3.1.2.asciidoc[] +include::ruby-v3.1.1.asciidoc[] +include::ruby-v3.1.0.asciidoc[] +include::ruby-v3.0.4.asciidoc[] +include::ruby-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/filters/ruby-v3.0.3.asciidoc b/docs/versioned-plugins/filters/ruby-v3.0.3.asciidoc new file mode 100644 index 000000000..c5699b506 --- /dev/null +++ b/docs/versioned-plugins/filters/ruby-v3.0.3.asciidoc @@ -0,0 +1,82 @@ +:plugin: ruby +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-ruby/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Ruby filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Execute ruby code. + +For example, to cancel 90% of events, you can do this: +[source,ruby] + filter { + ruby { + # Cancel 90% of events + code => "event.cancel if rand <= 0.90" + } + } + +If you need to create additional events, it cannot be done as in other filters where you would use `yield`, +you must use a specific syntax `new_event_block.call(event)` like in this example duplicating the input event +[source,ruby] +filter { + ruby { + code => "new_event_block.call(event.clone)" + } +} + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ruby Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-init>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The code to execute for every event. +You will have an `event` variable available that is the event itself. See the <> for more information. + +[id="{version}-plugins-{type}s-{plugin}-init"] +===== `init` + + * Value type is <> + * There is no default value for this setting. + +Any code to execute at logstash startup-time + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/ruby-v3.0.4.asciidoc b/docs/versioned-plugins/filters/ruby-v3.0.4.asciidoc new file mode 100644 index 000000000..b73bf0a05 --- /dev/null +++ b/docs/versioned-plugins/filters/ruby-v3.0.4.asciidoc @@ -0,0 +1,82 @@ +:plugin: ruby +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-ruby/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Ruby filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Execute ruby code. + +For example, to cancel 90% of events, you can do this: +[source,ruby] + filter { + ruby { + # Cancel 90% of events + code => "event.cancel if rand <= 0.90" + } + } + +If you need to create additional events, it cannot be done as in other filters where you would use `yield`, +you must use a specific syntax `new_event_block.call(event)` like in this example duplicating the input event +[source,ruby] +filter { + ruby { + code => "new_event_block.call(event.clone)" + } +} + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ruby Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-init>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The code to execute for every event. +You will have an `event` variable available that is the event itself. See the <> for more information. + +[id="{version}-plugins-{type}s-{plugin}-init"] +===== `init` + + * Value type is <> + * There is no default value for this setting. + +Any code to execute at logstash startup-time + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/ruby-v3.1.0.asciidoc b/docs/versioned-plugins/filters/ruby-v3.1.0.asciidoc new file mode 100644 index 000000000..36a09bfe3 --- /dev/null +++ b/docs/versioned-plugins/filters/ruby-v3.1.0.asciidoc @@ -0,0 +1,192 @@ +:plugin: ruby +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.0 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-ruby/blob/v3.1.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Ruby filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Execute ruby code. This filter accepts inline ruby code or a ruby file. +The two options are mutually exclusive and have slightly different ways of working, +which are described below. + +===== Inline ruby code + +To inline ruby in your filter, place all code in the `code` option. This code will be executed for every event the filter receives. You can also place ruby code in the `init` option - it will be executed only once during the plugin's register phase. + +For example, to cancel 90% of events, you can do this: +[source,ruby] + filter { + ruby { + # Cancel 90% of events + code => "event.cancel if rand <= 0.90" + } + } + +If you need to create additional events, you must use a specific syntax `new_event_block.call(event)` like in this example duplicating the input event +[source,ruby] +filter { + ruby { + code => "new_event_block.call(event.clone)" + } +} + +===== Using a Ruby script file + +As the inline code can become complex and hard to structure inside of a text string in `code`, it's then preferrable to place the Ruby code in a .rb file, using the `path` option. + +[source,ruby] + filter { + ruby { + # Cancel 90% of events + path => "/etc/logstash/drop_percentage.rb" + script_params => { "percentage" => 0.9 } + } + } + +The ruby script file should define the following methods: + + * `register(params)`: An optional register method that receives the key/value hash passed in the `script_params` configuration option + * `filter(event)`: A mandatory Ruby method that accepts a Logstash event and must return an array of events + +Below is an example implementation of the `drop_percentage.rb` ruby script that drops a configurable percentage of events: + +[source,ruby] +---- +# the value of `params` is the value of the hash passed to `script_params` +# in the logstash configuration +def register(params) + @drop_percentage = params["percentage"] +end + +# the filter method receives an event and must return a list of events. +# Dropping an event means not including it in the return array, +# while creating new ones only requires you to add a new instance of +# LogStash::Event to the returned array +def filter(event) + if rand >= @drop_percentage + return [event] + else + return [] # return empty array to cancel event + end +end +---- + +====== Testing the ruby script + +To validate the behaviour of the `filter` method you implemented, +the Ruby filter plugin provides an inline test framework where you +can assert expectations. +The tests you define will run when the pipeline is created and will +prevent it from starting if a test fails. + +You can also verify if the tests pass using the logstash `-t` flag. + +For example above, you can write at the bottom of the `drop_percentage.rb` +ruby script the following test: + +[source,ruby] +---- +def register(params) + # .. +end + +def filter(event) + # .. +end + +test "drop percentage 100%" do + parameters do + { "percentage" => 1 } + end + + in_event { { "message" => "hello" } } + + expect("drops the event") do |events| + events.size == 0 + end +end +---- + +We can now test that the ruby script we're using is implemented correctly: + +[source] +---- +% bin/logstash -e "filter { ruby { path => '/etc/logstash/drop_percentage.rb' script_params => { 'drop_percentage' => 0.5 } } }" -t +[2017-10-13T13:44:29,723][INFO ][logstash.filters.ruby.script] Test run complete {:script_path=>"/etc/logstash/drop_percentage.rb", :results=>{:passed=>1, :failed=>0, :errored=>0}} +Configuration OK +[2017-10-13T13:44:29,887][INFO ][logstash.runner ] Using config.test_and_exit mode. Config Validation Result: OK. Exiting Logstash +---- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ruby Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-code>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-init>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_params>> |<>,{}|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_exception>> |<>,_rubyexception|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * Value type is <> + * There is no default value for this setting. + * This setting cannot be used together with `path`. + +The code to execute for every event. +You will have an `event` variable available that is the event itself. See the <> for more information. + +[id="{version}-plugins-{type}s-{plugin}-init"] +===== `init` + + * Value type is <> + * There is no default value for this setting. + +Any code to execute at logstash startup-time + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + * This setting cannot be used together with `code`. + +The path of the ruby script file that implements the `filter` method. + +[id="{version}-plugins-{type}s-{plugin}-script_params"] +===== `script_params` + + * Value type is <> + * Default value is `{}` + +A key/value hash with parameters that are passed to the register method +of your ruby script file defined in `path`. + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/ruby-v3.1.1.asciidoc b/docs/versioned-plugins/filters/ruby-v3.1.1.asciidoc new file mode 100644 index 000000000..4ba628eb6 --- /dev/null +++ b/docs/versioned-plugins/filters/ruby-v3.1.1.asciidoc @@ -0,0 +1,192 @@ +:plugin: ruby +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.1 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-ruby/blob/v3.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Ruby filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Execute ruby code. This filter accepts inline ruby code or a ruby file. +The two options are mutually exclusive and have slightly different ways of working, +which are described below. + +===== Inline ruby code + +To inline ruby in your filter, place all code in the `code` option. This code will be executed for every event the filter receives. You can also place ruby code in the `init` option - it will be executed only once during the plugin's register phase. + +For example, to cancel 90% of events, you can do this: +[source,ruby] + filter { + ruby { + # Cancel 90% of events + code => "event.cancel if rand <= 0.90" + } + } + +If you need to create additional events, you must use a specific syntax `new_event_block.call(event)` like in this example duplicating the input event +[source,ruby] +filter { + ruby { + code => "new_event_block.call(event.clone)" + } +} + +===== Using a Ruby script file + +As the inline code can become complex and hard to structure inside of a text string in `code`, it's then preferrable to place the Ruby code in a .rb file, using the `path` option. + +[source,ruby] + filter { + ruby { + # Cancel 90% of events + path => "/etc/logstash/drop_percentage.rb" + script_params => { "percentage" => 0.9 } + } + } + +The ruby script file should define the following methods: + + * `register(params)`: An optional register method that receives the key/value hash passed in the `script_params` configuration option + * `filter(event)`: A mandatory Ruby method that accepts a Logstash event and must return an array of events + +Below is an example implementation of the `drop_percentage.rb` ruby script that drops a configurable percentage of events: + +[source,ruby] +---- +# the value of `params` is the value of the hash passed to `script_params` +# in the logstash configuration +def register(params) + @drop_percentage = params["percentage"] +end + +# the filter method receives an event and must return a list of events. +# Dropping an event means not including it in the return array, +# while creating new ones only requires you to add a new instance of +# LogStash::Event to the returned array +def filter(event) + if rand >= @drop_percentage + return [event] + else + return [] # return empty array to cancel event + end +end +---- + +====== Testing the ruby script + +To validate the behaviour of the `filter` method you implemented, +the Ruby filter plugin provides an inline test framework where you +can assert expectations. +The tests you define will run when the pipeline is created and will +prevent it from starting if a test fails. + +You can also verify if the tests pass using the logstash `-t` flag. + +For example above, you can write at the bottom of the `drop_percentage.rb` +ruby script the following test: + +[source,ruby] +---- +def register(params) + # .. +end + +def filter(event) + # .. +end + +test "drop percentage 100%" do + parameters do + { "percentage" => 1 } + end + + in_event { { "message" => "hello" } } + + expect("drops the event") do |events| + events.size == 0 + end +end +---- + +We can now test that the ruby script we're using is implemented correctly: + +[source] +---- +% bin/logstash -e "filter { ruby { path => '/etc/logstash/drop_percentage.rb' script_params => { 'drop_percentage' => 0.5 } } }" -t +[2017-10-13T13:44:29,723][INFO ][logstash.filters.ruby.script] Test run complete {:script_path=>"/etc/logstash/drop_percentage.rb", :results=>{:passed=>1, :failed=>0, :errored=>0}} +Configuration OK +[2017-10-13T13:44:29,887][INFO ][logstash.runner ] Using config.test_and_exit mode. Config Validation Result: OK. Exiting Logstash +---- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ruby Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-code>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-init>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_params>> |<>,{}|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_exception>> |<>,_rubyexception|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * Value type is <> + * There is no default value for this setting. + * This setting cannot be used together with `path`. + +The code to execute for every event. +You will have an `event` variable available that is the event itself. See the <> for more information. + +[id="{version}-plugins-{type}s-{plugin}-init"] +===== `init` + + * Value type is <> + * There is no default value for this setting. + +Any code to execute at logstash startup-time + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + * This setting cannot be used together with `code`. + +The path of the ruby script file that implements the `filter` method. + +[id="{version}-plugins-{type}s-{plugin}-script_params"] +===== `script_params` + + * Value type is <> + * Default value is `{}` + +A key/value hash with parameters that are passed to the register method +of your ruby script file defined in `path`. + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/ruby-v3.1.2.asciidoc b/docs/versioned-plugins/filters/ruby-v3.1.2.asciidoc new file mode 100644 index 000000000..ad4fc8d37 --- /dev/null +++ b/docs/versioned-plugins/filters/ruby-v3.1.2.asciidoc @@ -0,0 +1,192 @@ +:plugin: ruby +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-11-28 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-ruby/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Ruby filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Execute ruby code. This filter accepts inline ruby code or a ruby file. +The two options are mutually exclusive and have slightly different ways of working, +which are described below. + +===== Inline ruby code + +To inline ruby in your filter, place all code in the `code` option. This code will be executed for every event the filter receives. You can also place ruby code in the `init` option - it will be executed only once during the plugin's register phase. + +For example, to cancel 90% of events, you can do this: +[source,ruby] + filter { + ruby { + # Cancel 90% of events + code => "event.cancel if rand <= 0.90" + } + } + +If you need to create additional events, you must use a specific syntax `new_event_block.call(event)` like in this example duplicating the input event +[source,ruby] +filter { + ruby { + code => "new_event_block.call(event.clone)" + } +} + +===== Using a Ruby script file + +As the inline code can become complex and hard to structure inside of a text string in `code`, it's then preferrable to place the Ruby code in a .rb file, using the `path` option. + +[source,ruby] + filter { + ruby { + # Cancel 90% of events + path => "/etc/logstash/drop_percentage.rb" + script_params => { "percentage" => 0.9 } + } + } + +The ruby script file should define the following methods: + + * `register(params)`: An optional register method that receives the key/value hash passed in the `script_params` configuration option + * `filter(event)`: A mandatory Ruby method that accepts a Logstash event and must return an array of events + +Below is an example implementation of the `drop_percentage.rb` ruby script that drops a configurable percentage of events: + +[source,ruby] +---- +# the value of `params` is the value of the hash passed to `script_params` +# in the logstash configuration +def register(params) + @drop_percentage = params["percentage"] +end + +# the filter method receives an event and must return a list of events. +# Dropping an event means not including it in the return array, +# while creating new ones only requires you to add a new instance of +# LogStash::Event to the returned array +def filter(event) + if rand >= @drop_percentage + return [event] + else + return [] # return empty array to cancel event + end +end +---- + +====== Testing the ruby script + +To validate the behaviour of the `filter` method you implemented, +the Ruby filter plugin provides an inline test framework where you +can assert expectations. +The tests you define will run when the pipeline is created and will +prevent it from starting if a test fails. + +You can also verify if the tests pass using the logstash `-t` flag. + +For example above, you can write at the bottom of the `drop_percentage.rb` +ruby script the following test: + +[source,ruby] +---- +def register(params) + # .. +end + +def filter(event) + # .. +end + +test "drop percentage 100%" do + parameters do + { "percentage" => 1 } + end + + in_event { { "message" => "hello" } } + + expect("drops the event") do |events| + events.size == 0 + end +end +---- + +We can now test that the ruby script we're using is implemented correctly: + +[source] +---- +% bin/logstash -e "filter { ruby { path => '/etc/logstash/drop_percentage.rb' script_params => { 'drop_percentage' => 0.5 } } }" -t +[2017-10-13T13:44:29,723][INFO ][logstash.filters.ruby.script] Test run complete {:script_path=>"/etc/logstash/drop_percentage.rb", :results=>{:passed=>1, :failed=>0, :errored=>0}} +Configuration OK +[2017-10-13T13:44:29,887][INFO ][logstash.runner ] Using config.test_and_exit mode. Config Validation Result: OK. Exiting Logstash +---- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ruby Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-code>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-init>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_params>> |<>,{}|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_exception>> |<>,_rubyexception|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * Value type is <> + * There is no default value for this setting. + * This setting cannot be used together with `path`. + +The code to execute for every event. +You will have an `event` variable available that is the event itself. See the <> for more information. + +[id="{version}-plugins-{type}s-{plugin}-init"] +===== `init` + + * Value type is <> + * There is no default value for this setting. + +Any code to execute at logstash startup-time + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + * This setting cannot be used together with `code`. + +The path of the ruby script file that implements the `filter` method. + +[id="{version}-plugins-{type}s-{plugin}-script_params"] +===== `script_params` + + * Value type is <> + * Default value is `{}` + +A key/value hash with parameters that are passed to the register method +of your ruby script file defined in `path`. + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/ruby-v3.1.3.asciidoc b/docs/versioned-plugins/filters/ruby-v3.1.3.asciidoc new file mode 100644 index 000000000..2f2cfbd53 --- /dev/null +++ b/docs/versioned-plugins/filters/ruby-v3.1.3.asciidoc @@ -0,0 +1,201 @@ +:plugin: ruby +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-12-06 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-ruby/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Ruby filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Execute ruby code. This filter accepts inline ruby code or a ruby file. +The two options are mutually exclusive and have slightly different ways of working, +which are described below. + +===== Inline ruby code + +To inline ruby in your filter, place all code in the `code` option. This code will be executed for every event the filter receives. You can also place ruby code in the `init` option - it will be executed only once during the plugin's register phase. + +For example, to cancel 90% of events, you can do this: +[source,ruby] + filter { + ruby { + # Cancel 90% of events + code => "event.cancel if rand <= 0.90" + } + } + +If you need to create additional events, you must use a specific syntax `new_event_block.call(event)` like in this example duplicating the input event +[source,ruby] +filter { + ruby { + code => "new_event_block.call(event.clone)" + } +} + +===== Using a Ruby script file + +As the inline code can become complex and hard to structure inside of a text string in `code`, it's then preferrable to place the Ruby code in a .rb file, using the `path` option. + +[source,ruby] + filter { + ruby { + # Cancel 90% of events + path => "/etc/logstash/drop_percentage.rb" + script_params => { "percentage" => 0.9 } + } + } + +The ruby script file should define the following methods: + + * `register(params)`: An optional register method that receives the key/value hash passed in the `script_params` configuration option + * `filter(event)`: A mandatory Ruby method that accepts a Logstash event and must return an array of events + +Below is an example implementation of the `drop_percentage.rb` ruby script that drops a configurable percentage of events: + +[source,ruby] +---- +# the value of `params` is the value of the hash passed to `script_params` +# in the logstash configuration +def register(params) + @drop_percentage = params["percentage"] +end + +# the filter method receives an event and must return a list of events. +# Dropping an event means not including it in the return array, +# while creating new ones only requires you to add a new instance of +# LogStash::Event to the returned array +def filter(event) + if rand >= @drop_percentage + return [event] + else + return [] # return empty array to cancel event + end +end +---- + +===== Testing the ruby script + +To validate the behaviour of the `filter` method you implemented, +the Ruby filter plugin provides an inline test framework where you +can assert expectations. +The tests you define will run when the pipeline is created and will +prevent it from starting if a test fails. + +You can also verify if the tests pass using the logstash `-t` flag. + +For example above, you can write at the bottom of the `drop_percentage.rb` +ruby script the following test: + +[source,ruby] +---- +def register(params) + # .. +end + +def filter(event) + # .. +end + +test "drop percentage 100%" do + parameters do + { "percentage" => 1 } + end + + in_event { { "message" => "hello" } } + + expect("drops the event") do |events| + events.size == 0 + end +end +---- + +We can now test that the ruby script we're using is implemented correctly: + +[source,shell] +---- +% bin/logstash -e "filter { ruby { path => '/etc/logstash/drop_percentage.rb' script_params => { 'drop_percentage' => 0.5 } } }" -t +[2017-10-13T13:44:29,723][INFO ][logstash.filters.ruby.script] Test run complete {:script_path=>"/etc/logstash/drop_percentage.rb", :results=>{:passed=>1, :failed=>0, :errored=>0}} +Configuration OK +[2017-10-13T13:44:29,887][INFO ][logstash.runner ] Using config.test_and_exit mode. Config Validation Result: OK. Exiting Logstash +---- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ruby Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-code>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-init>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_params>> |<>,{}|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_exception>> |<>,_rubyexception|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-code"] +===== `code` + + * Value type is <> + * There is no default value for this setting. + * This setting cannot be used together with `path`. + +The code to execute for every event. +You will have an `event` variable available that is the event itself. See the <> for more information. + +[id="{version}-plugins-{type}s-{plugin}-init"] +===== `init` + + * Value type is <> + * There is no default value for this setting. + +Any code to execute at logstash startup-time + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + * This setting cannot be used together with `code`. + +The path of the ruby script file that implements the `filter` method. + +[id="{version}-plugins-{type}s-{plugin}-script_params"] +===== `script_params` + + * Value type is <> + * Default value is `{}` + +A key/value hash with parameters that are passed to the register method +of your ruby script file defined in `path`. + +[id="{version}-plugins-{type}s-{plugin}-tag_on_exception"] +===== `tag_on_exception` + + * Value type is <> + * Default value is `_rubyexception` + +Tag to add to events in case the ruby code (either inline or file based) +causes an exception. + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/script-index.asciidoc b/docs/versioned-plugins/filters/script-index.asciidoc new file mode 100644 index 000000000..30723eb68 --- /dev/null +++ b/docs/versioned-plugins/filters/script-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: script +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/filters/sleep-index.asciidoc b/docs/versioned-plugins/filters/sleep-index.asciidoc new file mode 100644 index 000000000..b5cf4bb8f --- /dev/null +++ b/docs/versioned-plugins/filters/sleep-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: sleep +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::sleep-v3.0.6.asciidoc[] +include::sleep-v3.0.5.asciidoc[] +include::sleep-v3.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/filters/sleep-v3.0.4.asciidoc b/docs/versioned-plugins/filters/sleep-v3.0.4.asciidoc new file mode 100644 index 000000000..a23d30cbd --- /dev/null +++ b/docs/versioned-plugins/filters/sleep-v3.0.4.asciidoc @@ -0,0 +1,119 @@ +:plugin: sleep +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-sleep/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Sleep filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Sleep a given amount of time. This will cause logstash +to stall for the given amount of time. This is useful +for rate limiting, etc. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sleep Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-every>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-replay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-time>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-every"] +===== `every` + + * Value type is <> + * Default value is `1` + +Sleep on every N'th. This option is ignored in replay mode. + +Example: +[source,ruby] + filter { + sleep { + time => "1" # Sleep 1 second + every => 10 # on every 10th event + } + } + +[id="{version}-plugins-{type}s-{plugin}-replay"] +===== `replay` + + * Value type is <> + * Default value is `false` + +Enable replay mode. + +Replay mode tries to sleep based on timestamps in each event. + +The amount of time to sleep is computed by subtracting the +previous event's timestamp from the current event's timestamp. +This helps you replay events in the same timeline as original. + +If you specify a `time` setting as well, this filter will +use the `time` value as a speed modifier. For example, +a `time` value of 2 will replay at double speed, while a +value of 0.25 will replay at 1/4th speed. + +For example: +[source,ruby] + filter { + sleep { + time => 2 + replay => true + } + } + +The above will sleep in such a way that it will perform +replay 2-times faster than the original time speed. + +[id="{version}-plugins-{type}s-{plugin}-time"] +===== `time` + + * Value type is <> + * There is no default value for this setting. + +The length of time to sleep, in seconds, for every event. + +This can be a number (eg, 0.5), or a string (eg, `%{foo}`) +The second form (string with a field value) is useful if +you have an attribute of your event that you want to use +to indicate the amount of time to sleep. + +Example: +[source,ruby] + filter { + sleep { + # Sleep 1 second for every event. + time => "1" + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/sleep-v3.0.5.asciidoc b/docs/versioned-plugins/filters/sleep-v3.0.5.asciidoc new file mode 100644 index 000000000..00d28a121 --- /dev/null +++ b/docs/versioned-plugins/filters/sleep-v3.0.5.asciidoc @@ -0,0 +1,119 @@ +:plugin: sleep +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-sleep/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Sleep filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Sleep a given amount of time. This will cause logstash +to stall for the given amount of time. This is useful +for rate limiting, etc. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sleep Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-every>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-replay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-time>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-every"] +===== `every` + + * Value type is <> + * Default value is `1` + +Sleep on every N'th. This option is ignored in replay mode. + +Example: +[source,ruby] + filter { + sleep { + time => "1" # Sleep 1 second + every => 10 # on every 10th event + } + } + +[id="{version}-plugins-{type}s-{plugin}-replay"] +===== `replay` + + * Value type is <> + * Default value is `false` + +Enable replay mode. + +Replay mode tries to sleep based on timestamps in each event. + +The amount of time to sleep is computed by subtracting the +previous event's timestamp from the current event's timestamp. +This helps you replay events in the same timeline as original. + +If you specify a `time` setting as well, this filter will +use the `time` value as a speed modifier. For example, +a `time` value of 2 will replay at double speed, while a +value of 0.25 will replay at 1/4th speed. + +For example: +[source,ruby] + filter { + sleep { + time => 2 + replay => true + } + } + +The above will sleep in such a way that it will perform +replay 2-times faster than the original time speed. + +[id="{version}-plugins-{type}s-{plugin}-time"] +===== `time` + + * Value type is <> + * There is no default value for this setting. + +The length of time to sleep, in seconds, for every event. + +This can be a number (eg, 0.5), or a string (eg, `%{foo}`) +The second form (string with a field value) is useful if +you have an attribute of your event that you want to use +to indicate the amount of time to sleep. + +Example: +[source,ruby] + filter { + sleep { + # Sleep 1 second for every event. + time => "1" + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/sleep-v3.0.6.asciidoc b/docs/versioned-plugins/filters/sleep-v3.0.6.asciidoc new file mode 100644 index 000000000..398cdafd1 --- /dev/null +++ b/docs/versioned-plugins/filters/sleep-v3.0.6.asciidoc @@ -0,0 +1,119 @@ +:plugin: sleep +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-sleep/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Sleep filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Sleep a given amount of time. This will cause logstash +to stall for the given amount of time. This is useful +for rate limiting, etc. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sleep Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-every>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-replay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-time>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-every"] +===== `every` + + * Value type is <> + * Default value is `1` + +Sleep on every N'th. This option is ignored in replay mode. + +Example: +[source,ruby] + filter { + sleep { + time => "1" # Sleep 1 second + every => 10 # on every 10th event + } + } + +[id="{version}-plugins-{type}s-{plugin}-replay"] +===== `replay` + + * Value type is <> + * Default value is `false` + +Enable replay mode. + +Replay mode tries to sleep based on timestamps in each event. + +The amount of time to sleep is computed by subtracting the +previous event's timestamp from the current event's timestamp. +This helps you replay events in the same timeline as original. + +If you specify a `time` setting as well, this filter will +use the `time` value as a speed modifier. For example, +a `time` value of 2 will replay at double speed, while a +value of 0.25 will replay at 1/4th speed. + +For example: +[source,ruby] + filter { + sleep { + time => 2 + replay => true + } + } + +The above will sleep in such a way that it will perform +replay 2-times faster than the original time speed. + +[id="{version}-plugins-{type}s-{plugin}-time"] +===== `time` + + * Value type is <> + * There is no default value for this setting. + +The length of time to sleep, in seconds, for every event. + +This can be a number (eg, 0.5), or a string (eg, `%{foo}`) +The second form (string with a field value) is useful if +you have an attribute of your event that you want to use +to indicate the amount of time to sleep. + +Example: +[source,ruby] + filter { + sleep { + # Sleep 1 second for every event. + time => "1" + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/split-index.asciidoc b/docs/versioned-plugins/filters/split-index.asciidoc new file mode 100644 index 000000000..c93ed6dd2 --- /dev/null +++ b/docs/versioned-plugins/filters/split-index.asciidoc @@ -0,0 +1,20 @@ +:plugin: split +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-11 +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-07-26 +| <> | 2017-06-23 +|======================================================================= + +include::split-v3.1.6.asciidoc[] +include::split-v3.1.5.asciidoc[] +include::split-v3.1.4.asciidoc[] +include::split-v3.1.3.asciidoc[] +include::split-v3.1.2.asciidoc[] + diff --git a/docs/versioned-plugins/filters/split-v3.1.2.asciidoc b/docs/versioned-plugins/filters/split-v3.1.2.asciidoc new file mode 100644 index 000000000..c7ca96658 --- /dev/null +++ b/docs/versioned-plugins/filters/split-v3.1.2.asciidoc @@ -0,0 +1,111 @@ +:plugin: split +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-split/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Split filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The split filter clones an event by splitting one of its fields and +placing each value resulting from the split into a clone of the original +event. The field being split can either be a string or an array. + +An example use case of this filter is for taking output from the +<> which emits one event for +the whole output of a command and splitting that output by newline - +making each line an event. + +Split filter can also be used to split array fields in events into individual events. +A very common pattern in JSON & XML is to make use of lists to group data together. + +For example, a json structure like this: + +[source,js] +---------------------------------- +{ field1: ..., + results: [ + { result ... }, + { result ... }, + { result ... }, + ... +] } +---------------------------------- + +The split filter can be used on the above data to create separate events for each value of `results` field + +[source,js] +---------------------------------- +filter { + split { + field => "results" + } +} +---------------------------------- + +The end result of each split is a complete copy of the event +with only the current split section of the given field changed. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Split Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-terminator>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-field"] +===== `field` + + * Value type is <> + * Default value is `"message"` + +The field which value is split by the terminator. +Can be a multiline message or the ID of an array. +Nested arrays are referenced like: "[object_id][array_id]" + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +The field within the new event which the value is split into. +If not set, the target field defaults to split field name. + +[id="{version}-plugins-{type}s-{plugin}-terminator"] +===== `terminator` + + * Value type is <> + * Default value is `"\n"` + +The string to split on. This is usually a line terminator, but can be any +string. If you are splitting a JSON array into multiple events, you can ignore this field. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/split-v3.1.3.asciidoc b/docs/versioned-plugins/filters/split-v3.1.3.asciidoc new file mode 100644 index 000000000..57b54043c --- /dev/null +++ b/docs/versioned-plugins/filters/split-v3.1.3.asciidoc @@ -0,0 +1,111 @@ +:plugin: split +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-07-26 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-split/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Split filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The split filter clones an event by splitting one of its fields and +placing each value resulting from the split into a clone of the original +event. The field being split can either be a string or an array. + +An example use case of this filter is for taking output from the +<> which emits one event for +the whole output of a command and splitting that output by newline - +making each line an event. + +Split filter can also be used to split array fields in events into individual events. +A very common pattern in JSON & XML is to make use of lists to group data together. + +For example, a json structure like this: + +[source,js] +---------------------------------- +{ field1: ..., + results: [ + { result ... }, + { result ... }, + { result ... }, + ... +] } +---------------------------------- + +The split filter can be used on the above data to create separate events for each value of `results` field + +[source,js] +---------------------------------- +filter { + split { + field => "results" + } +} +---------------------------------- + +The end result of each split is a complete copy of the event +with only the current split section of the given field changed. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Split Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-terminator>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-field"] +===== `field` + + * Value type is <> + * Default value is `"message"` + +The field which value is split by the terminator. +Can be a multiline message or the ID of an array. +Nested arrays are referenced like: "[object_id][array_id]" + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +The field within the new event which the value is split into. +If not set, the target field defaults to split field name. + +[id="{version}-plugins-{type}s-{plugin}-terminator"] +===== `terminator` + + * Value type is <> + * Default value is `"\n"` + +The string to split on. This is usually a line terminator, but can be any +string. If you are splitting a JSON array into multiple events, you can ignore this field. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/split-v3.1.4.asciidoc b/docs/versioned-plugins/filters/split-v3.1.4.asciidoc new file mode 100644 index 000000000..b64e1215b --- /dev/null +++ b/docs/versioned-plugins/filters/split-v3.1.4.asciidoc @@ -0,0 +1,111 @@ +:plugin: split +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-split/blob/v3.1.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Split filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The split filter clones an event by splitting one of its fields and +placing each value resulting from the split into a clone of the original +event. The field being split can either be a string or an array. + +An example use case of this filter is for taking output from the +<> which emits one event for +the whole output of a command and splitting that output by newline - +making each line an event. + +Split filter can also be used to split array fields in events into individual events. +A very common pattern in JSON & XML is to make use of lists to group data together. + +For example, a json structure like this: + +[source,js] +---------------------------------- +{ field1: ..., + results: [ + { result ... }, + { result ... }, + { result ... }, + ... +] } +---------------------------------- + +The split filter can be used on the above data to create separate events for each value of `results` field + +[source,js] +---------------------------------- +filter { + split { + field => "results" + } +} +---------------------------------- + +The end result of each split is a complete copy of the event +with only the current split section of the given field changed. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Split Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-terminator>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-field"] +===== `field` + + * Value type is <> + * Default value is `"message"` + +The field which value is split by the terminator. +Can be a multiline message or the ID of an array. +Nested arrays are referenced like: "[object_id][array_id]" + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +The field within the new event which the value is split into. +If not set, the target field defaults to split field name. + +[id="{version}-plugins-{type}s-{plugin}-terminator"] +===== `terminator` + + * Value type is <> + * Default value is `"\n"` + +The string to split on. This is usually a line terminator, but can be any +string. If you are splitting a JSON array into multiple events, you can ignore this field. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/split-v3.1.5.asciidoc b/docs/versioned-plugins/filters/split-v3.1.5.asciidoc new file mode 100644 index 000000000..e3ad88491 --- /dev/null +++ b/docs/versioned-plugins/filters/split-v3.1.5.asciidoc @@ -0,0 +1,111 @@ +:plugin: split +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-split/blob/v3.1.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Split filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The split filter clones an event by splitting one of its fields and +placing each value resulting from the split into a clone of the original +event. The field being split can either be a string or an array. + +An example use case of this filter is for taking output from the +<> which emits one event for +the whole output of a command and splitting that output by newline - +making each line an event. + +Split filter can also be used to split array fields in events into individual events. +A very common pattern in JSON & XML is to make use of lists to group data together. + +For example, a json structure like this: + +[source,js] +---------------------------------- +{ field1: ..., + results: [ + { result ... }, + { result ... }, + { result ... }, + ... +] } +---------------------------------- + +The split filter can be used on the above data to create separate events for each value of `results` field + +[source,js] +---------------------------------- +filter { + split { + field => "results" + } +} +---------------------------------- + +The end result of each split is a complete copy of the event +with only the current split section of the given field changed. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Split Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-terminator>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-field"] +===== `field` + + * Value type is <> + * Default value is `"message"` + +The field which value is split by the terminator. +Can be a multiline message or the ID of an array. +Nested arrays are referenced like: "[object_id][array_id]" + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +The field within the new event which the value is split into. +If not set, the target field defaults to split field name. + +[id="{version}-plugins-{type}s-{plugin}-terminator"] +===== `terminator` + + * Value type is <> + * Default value is `"\n"` + +The string to split on. This is usually a line terminator, but can be any +string. If you are splitting a JSON array into multiple events, you can ignore this field. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/split-v3.1.6.asciidoc b/docs/versioned-plugins/filters/split-v3.1.6.asciidoc new file mode 100644 index 000000000..3cb704737 --- /dev/null +++ b/docs/versioned-plugins/filters/split-v3.1.6.asciidoc @@ -0,0 +1,111 @@ +:plugin: split +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.6 +:release_date: 2017-12-11 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-split/blob/v3.1.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Split filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The split filter clones an event by splitting one of its fields and +placing each value resulting from the split into a clone of the original +event. The field being split can either be a string or an array. + +An example use case of this filter is for taking output from the +<> which emits one event for +the whole output of a command and splitting that output by newline - +making each line an event. + +Split filter can also be used to split array fields in events into individual events. +A very common pattern in JSON & XML is to make use of lists to group data together. + +For example, a json structure like this: + +[source,js] +---------------------------------- +{ field1: ..., + results: [ + { result ... }, + { result ... }, + { result ... }, + ... +] } +---------------------------------- + +The split filter can be used on the above data to create separate events for each value of `results` field + +[source,js] +---------------------------------- +filter { + split { + field => "results" + } +} +---------------------------------- + +The end result of each split is a complete copy of the event +with only the current split section of the given field changed. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Split Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-terminator>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-field"] +===== `field` + + * Value type is <> + * Default value is `"message"` + +The field which value is split by the terminator. +Can be a multiline message or the ID of an array. +Nested arrays are referenced like: "[object_id][array_id]" + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +The field within the new event which the value is split into. +If not set, the target field defaults to split field name. + +[id="{version}-plugins-{type}s-{plugin}-terminator"] +===== `terminator` + + * Value type is <> + * Default value is `"\n"` + +The string to split on. This is usually a line terminator, but can be any +string. If you are splitting a JSON array into multiple events, you can ignore this field. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/syslog_pri-index.asciidoc b/docs/versioned-plugins/filters/syslog_pri-index.asciidoc new file mode 100644 index 000000000..cdb40ada2 --- /dev/null +++ b/docs/versioned-plugins/filters/syslog_pri-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: syslog_pri +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::syslog_pri-v3.0.5.asciidoc[] +include::syslog_pri-v3.0.4.asciidoc[] +include::syslog_pri-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/filters/syslog_pri-v3.0.3.asciidoc b/docs/versioned-plugins/filters/syslog_pri-v3.0.3.asciidoc new file mode 100644 index 000000000..b71b1e7b7 --- /dev/null +++ b/docs/versioned-plugins/filters/syslog_pri-v3.0.3.asciidoc @@ -0,0 +1,85 @@ +:plugin: syslog_pri +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-syslog_pri/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Syslog_pri filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Filter plugin for logstash to parse the `PRI` field from the front +of a Syslog (RFC3164) message. If no priority is set, it will +default to 13 (per RFC). + +This filter is based on the original `syslog.rb` code shipped +with logstash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Syslog_pri Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-facility_labels>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-severity_labels>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-syslog_pri_field_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-facility_labels"] +===== `facility_labels` + + * Value type is <> + * Default value is `["kernel", "user-level", "mail", "daemon", "security/authorization", "syslogd", "line printer", "network news", "uucp", "clock", "security/authorization", "ftp", "ntp", "log audit", "log alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"]` + +Labels for facility levels. This comes from RFC3164. + +[id="{version}-plugins-{type}s-{plugin}-severity_labels"] +===== `severity_labels` + + * Value type is <> + * Default value is `["emergency", "alert", "critical", "error", "warning", "notice", "informational", "debug"]` + +Labels for severity levels. This comes from RFC3164. + +[id="{version}-plugins-{type}s-{plugin}-syslog_pri_field_name"] +===== `syslog_pri_field_name` + + * Value type is <> + * Default value is `"syslog_pri"` + +Name of field which passes in the extracted PRI part of the syslog message + +[id="{version}-plugins-{type}s-{plugin}-use_labels"] +===== `use_labels` + + * Value type is <> + * Default value is `true` + +set the status to experimental/beta/stable +Add human-readable names after parsing severity and facility from PRI + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/syslog_pri-v3.0.4.asciidoc b/docs/versioned-plugins/filters/syslog_pri-v3.0.4.asciidoc new file mode 100644 index 000000000..5a41e66a2 --- /dev/null +++ b/docs/versioned-plugins/filters/syslog_pri-v3.0.4.asciidoc @@ -0,0 +1,85 @@ +:plugin: syslog_pri +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-syslog_pri/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Syslog_pri filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Filter plugin for logstash to parse the `PRI` field from the front +of a Syslog (RFC3164) message. If no priority is set, it will +default to 13 (per RFC). + +This filter is based on the original `syslog.rb` code shipped +with logstash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Syslog_pri Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-facility_labels>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-severity_labels>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-syslog_pri_field_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-facility_labels"] +===== `facility_labels` + + * Value type is <> + * Default value is `["kernel", "user-level", "mail", "daemon", "security/authorization", "syslogd", "line printer", "network news", "uucp", "clock", "security/authorization", "ftp", "ntp", "log audit", "log alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"]` + +Labels for facility levels. This comes from RFC3164. + +[id="{version}-plugins-{type}s-{plugin}-severity_labels"] +===== `severity_labels` + + * Value type is <> + * Default value is `["emergency", "alert", "critical", "error", "warning", "notice", "informational", "debug"]` + +Labels for severity levels. This comes from RFC3164. + +[id="{version}-plugins-{type}s-{plugin}-syslog_pri_field_name"] +===== `syslog_pri_field_name` + + * Value type is <> + * Default value is `"syslog_pri"` + +Name of field which passes in the extracted PRI part of the syslog message + +[id="{version}-plugins-{type}s-{plugin}-use_labels"] +===== `use_labels` + + * Value type is <> + * Default value is `true` + +set the status to experimental/beta/stable +Add human-readable names after parsing severity and facility from PRI + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/syslog_pri-v3.0.5.asciidoc b/docs/versioned-plugins/filters/syslog_pri-v3.0.5.asciidoc new file mode 100644 index 000000000..9a07a466c --- /dev/null +++ b/docs/versioned-plugins/filters/syslog_pri-v3.0.5.asciidoc @@ -0,0 +1,85 @@ +:plugin: syslog_pri +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-syslog_pri/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Syslog_pri filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Filter plugin for logstash to parse the `PRI` field from the front +of a Syslog (RFC3164) message. If no priority is set, it will +default to 13 (per RFC). + +This filter is based on the original `syslog.rb` code shipped +with logstash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Syslog_pri Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-facility_labels>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-severity_labels>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-syslog_pri_field_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-facility_labels"] +===== `facility_labels` + + * Value type is <> + * Default value is `["kernel", "user-level", "mail", "daemon", "security/authorization", "syslogd", "line printer", "network news", "uucp", "clock", "security/authorization", "ftp", "ntp", "log audit", "log alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"]` + +Labels for facility levels. This comes from RFC3164. + +[id="{version}-plugins-{type}s-{plugin}-severity_labels"] +===== `severity_labels` + + * Value type is <> + * Default value is `["emergency", "alert", "critical", "error", "warning", "notice", "informational", "debug"]` + +Labels for severity levels. This comes from RFC3164. + +[id="{version}-plugins-{type}s-{plugin}-syslog_pri_field_name"] +===== `syslog_pri_field_name` + + * Value type is <> + * Default value is `"syslog_pri"` + +Name of field which passes in the extracted PRI part of the syslog message + +[id="{version}-plugins-{type}s-{plugin}-use_labels"] +===== `use_labels` + + * Value type is <> + * Default value is `true` + +set the status to experimental/beta/stable +Add human-readable names after parsing severity and facility from PRI + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/throttle-index.asciidoc b/docs/versioned-plugins/filters/throttle-index.asciidoc new file mode 100644 index 000000000..4c91c2d02 --- /dev/null +++ b/docs/versioned-plugins/filters/throttle-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: throttle +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::throttle-v4.0.4.asciidoc[] +include::throttle-v4.0.3.asciidoc[] +include::throttle-v4.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/filters/throttle-v4.0.2.asciidoc b/docs/versioned-plugins/filters/throttle-v4.0.2.asciidoc new file mode 100644 index 000000000..bb982f483 --- /dev/null +++ b/docs/versioned-plugins/filters/throttle-v4.0.2.asciidoc @@ -0,0 +1,252 @@ +:plugin: throttle +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-throttle/blob/v4.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Throttle filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The throttle filter is for throttling the number of events. The filter is +configured with a lower bound, the "before_count", and upper bound, the "after_count", +and a period of time. All events passing through the filter will be counted based on +their key and the event timestamp. As long as the count is less than the "before_count" +or greater than the "after_count", the event will be "throttled" which means the filter +will be considered successful and any tags or fields will be added (or removed). + +The plugin is thread-safe and properly tracks past events. + +For example, if you wanted to throttle events so you only receive an event after 2 +occurrences and you get no more than 3 in 10 minutes, you would use the configuration: +[source,ruby] + period => 600 + max_age => 1200 + before_count => 3 + after_count => 5 + +Which would result in: +========================== + event 1 - throttled (successful filter, period start) + event 2 - throttled (successful filter) + event 3 - not throttled + event 4 - not throttled + event 5 - not throttled + event 6 - throttled (successful filter) + event 7 - throttled (successful filter) + event x - throttled (successful filter) + period end + event 1 - throttled (successful filter, period start) + event 2 - throttled (successful filter) + event 3 - not throttled + event 4 - not throttled + event 5 - not throttled + event 6 - throttled (successful filter) + ... +========================== +Another example is if you wanted to throttle events so you only +receive 1 event per hour, you would use the configuration: +[source,ruby] + period => 3600 + max_age => 7200 + before_count => -1 + after_count => 1 + +Which would result in: +========================== + event 1 - not throttled (period start) + event 2 - throttled (successful filter) + event 3 - throttled (successful filter) + event 4 - throttled (successful filter) + event x - throttled (successful filter) + period end + event 1 - not throttled (period start) + event 2 - throttled (successful filter) + event 3 - throttled (successful filter) + event 4 - throttled (successful filter) + ... +========================== +A common use case would be to use the throttle filter to throttle events before 3 and +after 5 while using multiple fields for the key and then use the drop filter to remove +throttled events. This configuration might appear as: +[source,ruby] + filter { + throttle { + before_count => 3 + after_count => 5 + period => 3600 + max_age => 7200 + key => "%{host}%{message}" + add_tag => "throttled" + } + if "throttled" in [tags] { + drop { } + } + } + +Another case would be to store all events, but only email non-throttled events +so the op's inbox isn't flooded with emails in the event of a system error. +This configuration might appear as: +[source,ruby] + filter { + throttle { + before_count => 3 + after_count => 5 + period => 3600 + max_age => 7200 + key => "%{message}" + add_tag => "throttled" + } + } + output { + if "throttled" not in [tags] { + email { + from => "logstash@mycompany.com" + subject => "Production System Alert" + to => "ops@mycompany.com" + via => "sendmail" + body => "Alert on %{host} from path %{path}:\n\n%{message}" + options => { "location" => "/usr/sbin/sendmail" } + } + } + elasticsearch_http { + host => "localhost" + port => "19200" + } + } + +When an event is received, the event key is stored in a key_cache. The key references +a timeslot_cache. The event is allocated to a timeslot (created dynamically) based on +the timestamp of the event. The timeslot counter is incremented. When the next event is +received (same key), within the same "period", it is allocated to the same timeslot. +The timeslot counter is incremented once again. + +The timeslot expires if the maximum age has been exceeded. The age is calculated +based on the latest event timestamp and the max_age configuration option. + + ---[::.. DESIGN ..::]--- + ++- [key_cache] -+ +-- [timeslot_cache] --+ +| | | @created: 1439839636 | + | @latest: 1439839836 | + [a.b.c] => +----------------------+ + | [1439839636] => 1 | + | [1439839736] => 3 | + | [1439839836] => 2 | + +----------------------+ + + +-- [timeslot_cache] --+ + | @created: eeeeeeeeee | + | @latest: llllllllll | + [x.y.z] => +----------------------+ + | [0000000060] => x | + | [0000000120] => y | +| | | [..........] => N | ++---------------+ +----------------------+ + +Frank de Jong (@frapex) +Mike Pilone (@mikepilone) + +only update if greater than current + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Throttle Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-after_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-before_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-max_age>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_counters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-period>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-after_count"] +===== `after_count` + + * Value type is <> + * Default value is `-1` + +Events greater than this count will be throttled. Setting this value to -1, the +default, will cause no events to be throttled based on the upper bound. + +[id="{version}-plugins-{type}s-{plugin}-before_count"] +===== `before_count` + + * Value type is <> + * Default value is `-1` + +Events less than this count will be throttled. Setting this value to -1, the +default, will cause no events to be throttled based on the lower bound. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The key used to identify events. Events with the same key are grouped together. +Field substitutions are allowed, so you can combine multiple fields. + +[id="{version}-plugins-{type}s-{plugin}-max_age"] +===== `max_age` + + * Value type is <> + * Default value is `3600` + +The maximum age of a timeslot. Higher values allow better tracking of an asynchronous +flow of events, but require more memory. As a rule of thumb you should set this value +to at least twice the period. Or set this value to period + maximum time offset +between unordered events with the same key. Values below the specified period give +unexpected results if unordered events are processed simultaneously. + +[id="{version}-plugins-{type}s-{plugin}-max_counters"] +===== `max_counters` + + * Value type is <> + * Default value is `100000` + +The maximum number of counters to store before decreasing the maximum age of a timeslot. +Setting this value to -1 will prevent an upper bound with no constraint on the +number of counters. This configuration value should only be used as a memory +control mechanism and can cause early counter expiration if the value is reached. +It is recommended to leave the default value and ensure that your key is selected +such that it limits the number of counters required (i.e. don't use UUID as the key). + +[id="{version}-plugins-{type}s-{plugin}-period"] +===== `period` + + * Value type is <> + * Default value is `"60"` + +The period in seconds after the first occurrence of an event until a new timeslot +is created. This period is tracked per unique key and per timeslot. +Field substitutions are allowed in this value. This allows you to specify that +certain kinds of events throttle for a specific period of time. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/throttle-v4.0.3.asciidoc b/docs/versioned-plugins/filters/throttle-v4.0.3.asciidoc new file mode 100644 index 000000000..3fae882d4 --- /dev/null +++ b/docs/versioned-plugins/filters/throttle-v4.0.3.asciidoc @@ -0,0 +1,252 @@ +:plugin: throttle +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-throttle/blob/v4.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Throttle filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The throttle filter is for throttling the number of events. The filter is +configured with a lower bound, the "before_count", and upper bound, the "after_count", +and a period of time. All events passing through the filter will be counted based on +their key and the event timestamp. As long as the count is less than the "before_count" +or greater than the "after_count", the event will be "throttled" which means the filter +will be considered successful and any tags or fields will be added (or removed). + +The plugin is thread-safe and properly tracks past events. + +For example, if you wanted to throttle events so you only receive an event after 2 +occurrences and you get no more than 3 in 10 minutes, you would use the configuration: +[source,ruby] + period => 600 + max_age => 1200 + before_count => 3 + after_count => 5 + +Which would result in: +========================== + event 1 - throttled (successful filter, period start) + event 2 - throttled (successful filter) + event 3 - not throttled + event 4 - not throttled + event 5 - not throttled + event 6 - throttled (successful filter) + event 7 - throttled (successful filter) + event x - throttled (successful filter) + period end + event 1 - throttled (successful filter, period start) + event 2 - throttled (successful filter) + event 3 - not throttled + event 4 - not throttled + event 5 - not throttled + event 6 - throttled (successful filter) + ... +========================== +Another example is if you wanted to throttle events so you only +receive 1 event per hour, you would use the configuration: +[source,ruby] + period => 3600 + max_age => 7200 + before_count => -1 + after_count => 1 + +Which would result in: +========================== + event 1 - not throttled (period start) + event 2 - throttled (successful filter) + event 3 - throttled (successful filter) + event 4 - throttled (successful filter) + event x - throttled (successful filter) + period end + event 1 - not throttled (period start) + event 2 - throttled (successful filter) + event 3 - throttled (successful filter) + event 4 - throttled (successful filter) + ... +========================== +A common use case would be to use the throttle filter to throttle events before 3 and +after 5 while using multiple fields for the key and then use the drop filter to remove +throttled events. This configuration might appear as: +[source,ruby] + filter { + throttle { + before_count => 3 + after_count => 5 + period => 3600 + max_age => 7200 + key => "%{host}%{message}" + add_tag => "throttled" + } + if "throttled" in [tags] { + drop { } + } + } + +Another case would be to store all events, but only email non-throttled events +so the op's inbox isn't flooded with emails in the event of a system error. +This configuration might appear as: +[source,ruby] + filter { + throttle { + before_count => 3 + after_count => 5 + period => 3600 + max_age => 7200 + key => "%{message}" + add_tag => "throttled" + } + } + output { + if "throttled" not in [tags] { + email { + from => "logstash@mycompany.com" + subject => "Production System Alert" + to => "ops@mycompany.com" + via => "sendmail" + body => "Alert on %{host} from path %{path}:\n\n%{message}" + options => { "location" => "/usr/sbin/sendmail" } + } + } + elasticsearch_http { + host => "localhost" + port => "19200" + } + } + +When an event is received, the event key is stored in a key_cache. The key references +a timeslot_cache. The event is allocated to a timeslot (created dynamically) based on +the timestamp of the event. The timeslot counter is incremented. When the next event is +received (same key), within the same "period", it is allocated to the same timeslot. +The timeslot counter is incremented once again. + +The timeslot expires if the maximum age has been exceeded. The age is calculated +based on the latest event timestamp and the max_age configuration option. + + ---[::.. DESIGN ..::]--- + ++- [key_cache] -+ +-- [timeslot_cache] --+ +| | | @created: 1439839636 | + | @latest: 1439839836 | + [a.b.c] => +----------------------+ + | [1439839636] => 1 | + | [1439839736] => 3 | + | [1439839836] => 2 | + +----------------------+ + + +-- [timeslot_cache] --+ + | @created: eeeeeeeeee | + | @latest: llllllllll | + [x.y.z] => +----------------------+ + | [0000000060] => x | + | [0000000120] => y | +| | | [..........] => N | ++---------------+ +----------------------+ + +Frank de Jong (@frapex) +Mike Pilone (@mikepilone) + +only update if greater than current + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Throttle Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-after_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-before_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-max_age>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_counters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-period>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-after_count"] +===== `after_count` + + * Value type is <> + * Default value is `-1` + +Events greater than this count will be throttled. Setting this value to -1, the +default, will cause no events to be throttled based on the upper bound. + +[id="{version}-plugins-{type}s-{plugin}-before_count"] +===== `before_count` + + * Value type is <> + * Default value is `-1` + +Events less than this count will be throttled. Setting this value to -1, the +default, will cause no events to be throttled based on the lower bound. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The key used to identify events. Events with the same key are grouped together. +Field substitutions are allowed, so you can combine multiple fields. + +[id="{version}-plugins-{type}s-{plugin}-max_age"] +===== `max_age` + + * Value type is <> + * Default value is `3600` + +The maximum age of a timeslot. Higher values allow better tracking of an asynchronous +flow of events, but require more memory. As a rule of thumb you should set this value +to at least twice the period. Or set this value to period + maximum time offset +between unordered events with the same key. Values below the specified period give +unexpected results if unordered events are processed simultaneously. + +[id="{version}-plugins-{type}s-{plugin}-max_counters"] +===== `max_counters` + + * Value type is <> + * Default value is `100000` + +The maximum number of counters to store before decreasing the maximum age of a timeslot. +Setting this value to -1 will prevent an upper bound with no constraint on the +number of counters. This configuration value should only be used as a memory +control mechanism and can cause early counter expiration if the value is reached. +It is recommended to leave the default value and ensure that your key is selected +such that it limits the number of counters required (i.e. don't use UUID as the key). + +[id="{version}-plugins-{type}s-{plugin}-period"] +===== `period` + + * Value type is <> + * Default value is `"60"` + +The period in seconds after the first occurrence of an event until a new timeslot +is created. This period is tracked per unique key and per timeslot. +Field substitutions are allowed in this value. This allows you to specify that +certain kinds of events throttle for a specific period of time. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/throttle-v4.0.4.asciidoc b/docs/versioned-plugins/filters/throttle-v4.0.4.asciidoc new file mode 100644 index 000000000..381eaea87 --- /dev/null +++ b/docs/versioned-plugins/filters/throttle-v4.0.4.asciidoc @@ -0,0 +1,252 @@ +:plugin: throttle +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.4 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-throttle/blob/v4.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Throttle filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The throttle filter is for throttling the number of events. The filter is +configured with a lower bound, the "before_count", and upper bound, the "after_count", +and a period of time. All events passing through the filter will be counted based on +their key and the event timestamp. As long as the count is less than the "before_count" +or greater than the "after_count", the event will be "throttled" which means the filter +will be considered successful and any tags or fields will be added (or removed). + +The plugin is thread-safe and properly tracks past events. + +For example, if you wanted to throttle events so you only receive an event after 2 +occurrences and you get no more than 3 in 10 minutes, you would use the configuration: +[source,ruby] + period => 600 + max_age => 1200 + before_count => 3 + after_count => 5 + +Which would result in: +========================== + event 1 - throttled (successful filter, period start) + event 2 - throttled (successful filter) + event 3 - not throttled + event 4 - not throttled + event 5 - not throttled + event 6 - throttled (successful filter) + event 7 - throttled (successful filter) + event x - throttled (successful filter) + period end + event 1 - throttled (successful filter, period start) + event 2 - throttled (successful filter) + event 3 - not throttled + event 4 - not throttled + event 5 - not throttled + event 6 - throttled (successful filter) + ... +========================== +Another example is if you wanted to throttle events so you only +receive 1 event per hour, you would use the configuration: +[source,ruby] + period => 3600 + max_age => 7200 + before_count => -1 + after_count => 1 + +Which would result in: +========================== + event 1 - not throttled (period start) + event 2 - throttled (successful filter) + event 3 - throttled (successful filter) + event 4 - throttled (successful filter) + event x - throttled (successful filter) + period end + event 1 - not throttled (period start) + event 2 - throttled (successful filter) + event 3 - throttled (successful filter) + event 4 - throttled (successful filter) + ... +========================== +A common use case would be to use the throttle filter to throttle events before 3 and +after 5 while using multiple fields for the key and then use the drop filter to remove +throttled events. This configuration might appear as: +[source,ruby] + filter { + throttle { + before_count => 3 + after_count => 5 + period => 3600 + max_age => 7200 + key => "%{host}%{message}" + add_tag => "throttled" + } + if "throttled" in [tags] { + drop { } + } + } + +Another case would be to store all events, but only email non-throttled events +so the op's inbox isn't flooded with emails in the event of a system error. +This configuration might appear as: +[source,ruby] + filter { + throttle { + before_count => 3 + after_count => 5 + period => 3600 + max_age => 7200 + key => "%{message}" + add_tag => "throttled" + } + } + output { + if "throttled" not in [tags] { + email { + from => "logstash@mycompany.com" + subject => "Production System Alert" + to => "ops@mycompany.com" + via => "sendmail" + body => "Alert on %{host} from path %{path}:\n\n%{message}" + options => { "location" => "/usr/sbin/sendmail" } + } + } + elasticsearch_http { + host => "localhost" + port => "19200" + } + } + +When an event is received, the event key is stored in a key_cache. The key references +a timeslot_cache. The event is allocated to a timeslot (created dynamically) based on +the timestamp of the event. The timeslot counter is incremented. When the next event is +received (same key), within the same "period", it is allocated to the same timeslot. +The timeslot counter is incremented once again. + +The timeslot expires if the maximum age has been exceeded. The age is calculated +based on the latest event timestamp and the max_age configuration option. + + ---[::.. DESIGN ..::]--- + ++- [key_cache] -+ +-- [timeslot_cache] --+ +| | | @created: 1439839636 | + | @latest: 1439839836 | + [a.b.c] => +----------------------+ + | [1439839636] => 1 | + | [1439839736] => 3 | + | [1439839836] => 2 | + +----------------------+ + + +-- [timeslot_cache] --+ + | @created: eeeeeeeeee | + | @latest: llllllllll | + [x.y.z] => +----------------------+ + | [0000000060] => x | + | [0000000120] => y | +| | | [..........] => N | ++---------------+ +----------------------+ + +Frank de Jong (@frapex) +Mike Pilone (@mikepilone) + +only update if greater than current + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Throttle Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-after_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-before_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-max_age>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_counters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-period>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-after_count"] +===== `after_count` + + * Value type is <> + * Default value is `-1` + +Events greater than this count will be throttled. Setting this value to -1, the +default, will cause no events to be throttled based on the upper bound. + +[id="{version}-plugins-{type}s-{plugin}-before_count"] +===== `before_count` + + * Value type is <> + * Default value is `-1` + +Events less than this count will be throttled. Setting this value to -1, the +default, will cause no events to be throttled based on the lower bound. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The key used to identify events. Events with the same key are grouped together. +Field substitutions are allowed, so you can combine multiple fields. + +[id="{version}-plugins-{type}s-{plugin}-max_age"] +===== `max_age` + + * Value type is <> + * Default value is `3600` + +The maximum age of a timeslot. Higher values allow better tracking of an asynchronous +flow of events, but require more memory. As a rule of thumb you should set this value +to at least twice the period. Or set this value to period + maximum time offset +between unordered events with the same key. Values below the specified period give +unexpected results if unordered events are processed simultaneously. + +[id="{version}-plugins-{type}s-{plugin}-max_counters"] +===== `max_counters` + + * Value type is <> + * Default value is `100000` + +The maximum number of counters to store before decreasing the maximum age of a timeslot. +Setting this value to -1 will prevent an upper bound with no constraint on the +number of counters. This configuration value should only be used as a memory +control mechanism and can cause early counter expiration if the value is reached. +It is recommended to leave the default value and ensure that your key is selected +such that it limits the number of counters required (i.e. don't use UUID as the key). + +[id="{version}-plugins-{type}s-{plugin}-period"] +===== `period` + + * Value type is <> + * Default value is `"60"` + +The period in seconds after the first occurrence of an event until a new timeslot +is created. This period is tracked per unique key and per timeslot. +Field substitutions are allowed in this value. This allows you to specify that +certain kinds of events throttle for a specific period of time. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/tld-index.asciidoc b/docs/versioned-plugins/filters/tld-index.asciidoc new file mode 100644 index 000000000..52f0574d3 --- /dev/null +++ b/docs/versioned-plugins/filters/tld-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: tld +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::tld-v3.0.3.asciidoc[] +include::tld-v3.0.2.asciidoc[] +include::tld-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/tld-v3.0.1.asciidoc b/docs/versioned-plugins/filters/tld-v3.0.1.asciidoc new file mode 100644 index 000000000..6235caaf5 --- /dev/null +++ b/docs/versioned-plugins/filters/tld-v3.0.1.asciidoc @@ -0,0 +1,73 @@ +:plugin: tld +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-tld/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Tld filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This example filter will replace the contents of the default +message field with whatever you specify in the configuration. + +It is only intended to be used as an example. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tld Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +Setting the config_name here is required. This is how you +configure this filter from your Logstash config. + +filter { + example { + message => "My message..." + } +} + +The source field to parse + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"tld"` + +The target field to place all the data + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/tld-v3.0.2.asciidoc b/docs/versioned-plugins/filters/tld-v3.0.2.asciidoc new file mode 100644 index 000000000..98747f1e4 --- /dev/null +++ b/docs/versioned-plugins/filters/tld-v3.0.2.asciidoc @@ -0,0 +1,73 @@ +:plugin: tld +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-tld/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tld filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This example filter will replace the contents of the default +message field with whatever you specify in the configuration. + +It is only intended to be used as an example. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tld Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +Setting the config_name here is required. This is how you +configure this filter from your Logstash config. + +filter { + example { + message => "My message..." + } +} + +The source field to parse + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"tld"` + +The target field to place all the data + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/tld-v3.0.3.asciidoc b/docs/versioned-plugins/filters/tld-v3.0.3.asciidoc new file mode 100644 index 000000000..a114a1b3c --- /dev/null +++ b/docs/versioned-plugins/filters/tld-v3.0.3.asciidoc @@ -0,0 +1,73 @@ +:plugin: tld +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-tld/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tld filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This example filter will replace the contents of the default +message field with whatever you specify in the configuration. + +It is only intended to be used as an example. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tld Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * Value type is <> + * Default value is `"message"` + +Setting the config_name here is required. This is how you +configure this filter from your Logstash config. + +filter { + example { + message => "My message..." + } +} + +The source field to parse + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * Default value is `"tld"` + +The target field to place all the data + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/translate-index.asciidoc b/docs/versioned-plugins/filters/translate-index.asciidoc new file mode 100644 index 000000000..dd85f98c7 --- /dev/null +++ b/docs/versioned-plugins/filters/translate-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: translate +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::translate-v3.0.4.asciidoc[] +include::translate-v3.0.3.asciidoc[] +include::translate-v3.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/filters/translate-v3.0.2.asciidoc b/docs/versioned-plugins/filters/translate-v3.0.2.asciidoc new file mode 100644 index 000000000..abf6c8101 --- /dev/null +++ b/docs/versioned-plugins/filters/translate-v3.0.2.asciidoc @@ -0,0 +1,211 @@ +:plugin: translate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-translate/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Translate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +A general search and replace tool that uses a configured hash +and/or a file to determine replacement values. Currently supported are +YAML, JSON, and CSV files. + +The dictionary entries can be specified in one of two ways: First, +the `dictionary` configuration item may contain a hash representing +the mapping. Second, an external file (readable by logstash) may be specified +in the `dictionary_path` configuration item. These two methods may not be used +in conjunction; it will produce an error. + +Operationally, if the event field specified in the `field` configuration +matches the EXACT contents of a dictionary entry key (or matches a regex if +`regex` configuration item has been enabled), the field's value will be substituted +with the matched key's value from the dictionary. + +By default, the translate filter will replace the contents of the +maching event field (in-place). However, by using the `destination` +configuration item, you may also specify a target event field to +populate with the new translated value. + +Alternatively, for simple string search and replacements for just a few values +you might consider using the gsub function of the mutate filter. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Translate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dictionary>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dictionary_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-exact>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fallback>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-override>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-refresh_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-regex>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-destination"] +===== `destination` + + * Value type is <> + * Default value is `"translation"` + +The destination field you wish to populate with the translated code. The default +is a field named `translation`. Set this to the same value as source if you want +to do a substitution, in this case filter will allways succeed. This will clobber +the old value of the source field! + +[id="{version}-plugins-{type}s-{plugin}-dictionary"] +===== `dictionary` + + * Value type is <> + * Default value is `{}` + +The dictionary to use for translation, when specified in the logstash filter +configuration item (i.e. do not use the `@dictionary_path` file). + +Example: +[source,ruby] + filter { + translate { + dictionary => [ "100", "Continue", + "101", "Switching Protocols", + "merci", "thank you", + "old version", "new version" ] + } + } + +NOTE: It is an error to specify both `dictionary` and `dictionary_path`. + +[id="{version}-plugins-{type}s-{plugin}-dictionary_path"] +===== `dictionary_path` + + * Value type is <> + * There is no default value for this setting. + +The full path of the external dictionary file. The format of the table +should be a standard YAML, JSON, or CSV. Make sure you specify any integer-based keys +in quotes. For example, the YAML file should look something like this: +[source,ruby] + "100": Continue + "101": Switching Protocols + merci: gracias + old version: new version + +NOTE: it is an error to specify both `dictionary` and `dictionary_path`. + +The currently supported formats are YAML, JSON, and CSV. Format selection is +based on the file extension: `json` for JSON, `yaml` or `yml` for YAML, and +`csv` for CSV. The JSON format only supports simple key/value, unnested +objects. The CSV format expects exactly two columns, with the first serving +as the original text, and the second column as the replacement. + +[id="{version}-plugins-{type}s-{plugin}-exact"] +===== `exact` + + * Value type is <> + * Default value is `true` + +When `exact => true`, the translate filter will populate the destination field +with the exact contents of the dictionary value. When `exact => false`, the +filter will populate the destination field with the result of any existing +destination field's data, with the translated value substituted in-place. + +For example, consider this simple translation.yml, configured to check the `data` field: +[source,ruby] + foo: bar + +If logstash receives an event with the `data` field set to `foo`, and `exact => true`, +the destination field will be populated with the string `bar`. +If `exact => false`, and logstash receives the same event, the destination field +will be also set to `bar`. However, if logstash receives an event with the `data` field +set to `foofing`, the destination field will be set to `barfing`. + +Set both `exact => true` AND `regex => `true` if you would like to match using dictionary +keys as regular expressions. A large dictionary could be expensive to match in this case. + +[id="{version}-plugins-{type}s-{plugin}-fallback"] +===== `fallback` + + * Value type is <> + * There is no default value for this setting. + +In case no translation occurs in the event (no matches), this will add a default +translation string, which will always populate `field`, if the match failed. + +For example, if we have configured `fallback => "no match"`, using this dictionary: +[source,ruby] + foo: bar + +Then, if logstash received an event with the field `foo` set to `bar`, the destination +field would be set to `bar`. However, if logstash received an event with `foo` set to `nope`, +then the destination field would still be populated, but with the value of `no match`. +This configuration can be dynamic and include parts of the event using the `%{field}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-field"] +===== `field` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the logstash event field containing the value to be compared for a +match by the translate filter (e.g. `message`, `host`, `response_code`). + +If this field is an array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-override"] +===== `override` + + * Value type is <> + * Default value is `false` + +If the destination (or target) field already exists, this configuration item specifies +whether the filter should skip translation (default) or overwrite the target field +value with the new translation value. + +[id="{version}-plugins-{type}s-{plugin}-refresh_interval"] +===== `refresh_interval` + + * Value type is <> + * Default value is `300` + +When using a dictionary file, this setting will indicate how frequently +(in seconds) logstash will check the dictionary file for updates. + +[id="{version}-plugins-{type}s-{plugin}-regex"] +===== `regex` + + * Value type is <> + * Default value is `false` + +If you'd like to treat dictionary keys as regular expressions, set `exact => true`. +Note: this is activated only when `exact => true`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/translate-v3.0.3.asciidoc b/docs/versioned-plugins/filters/translate-v3.0.3.asciidoc new file mode 100644 index 000000000..51a4d43b5 --- /dev/null +++ b/docs/versioned-plugins/filters/translate-v3.0.3.asciidoc @@ -0,0 +1,211 @@ +:plugin: translate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-translate/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Translate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +A general search and replace tool that uses a configured hash +and/or a file to determine replacement values. Currently supported are +YAML, JSON, and CSV files. + +The dictionary entries can be specified in one of two ways: First, +the `dictionary` configuration item may contain a hash representing +the mapping. Second, an external file (readable by logstash) may be specified +in the `dictionary_path` configuration item. These two methods may not be used +in conjunction; it will produce an error. + +Operationally, if the event field specified in the `field` configuration +matches the EXACT contents of a dictionary entry key (or matches a regex if +`regex` configuration item has been enabled), the field's value will be substituted +with the matched key's value from the dictionary. + +By default, the translate filter will replace the contents of the +maching event field (in-place). However, by using the `destination` +configuration item, you may also specify a target event field to +populate with the new translated value. + +Alternatively, for simple string search and replacements for just a few values +you might consider using the gsub function of the mutate filter. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Translate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dictionary>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dictionary_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-exact>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fallback>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-override>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-refresh_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-regex>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-destination"] +===== `destination` + + * Value type is <> + * Default value is `"translation"` + +The destination field you wish to populate with the translated code. The default +is a field named `translation`. Set this to the same value as source if you want +to do a substitution, in this case filter will allways succeed. This will clobber +the old value of the source field! + +[id="{version}-plugins-{type}s-{plugin}-dictionary"] +===== `dictionary` + + * Value type is <> + * Default value is `{}` + +The dictionary to use for translation, when specified in the logstash filter +configuration item (i.e. do not use the `@dictionary_path` file). + +Example: +[source,ruby] + filter { + translate { + dictionary => [ "100", "Continue", + "101", "Switching Protocols", + "merci", "thank you", + "old version", "new version" ] + } + } + +NOTE: It is an error to specify both `dictionary` and `dictionary_path`. + +[id="{version}-plugins-{type}s-{plugin}-dictionary_path"] +===== `dictionary_path` + + * Value type is <> + * There is no default value for this setting. + +The full path of the external dictionary file. The format of the table +should be a standard YAML, JSON, or CSV. Make sure you specify any integer-based keys +in quotes. For example, the YAML file should look something like this: +[source,ruby] + "100": Continue + "101": Switching Protocols + merci: gracias + old version: new version + +NOTE: it is an error to specify both `dictionary` and `dictionary_path`. + +The currently supported formats are YAML, JSON, and CSV. Format selection is +based on the file extension: `json` for JSON, `yaml` or `yml` for YAML, and +`csv` for CSV. The JSON format only supports simple key/value, unnested +objects. The CSV format expects exactly two columns, with the first serving +as the original text, and the second column as the replacement. + +[id="{version}-plugins-{type}s-{plugin}-exact"] +===== `exact` + + * Value type is <> + * Default value is `true` + +When `exact => true`, the translate filter will populate the destination field +with the exact contents of the dictionary value. When `exact => false`, the +filter will populate the destination field with the result of any existing +destination field's data, with the translated value substituted in-place. + +For example, consider this simple translation.yml, configured to check the `data` field: +[source,ruby] + foo: bar + +If logstash receives an event with the `data` field set to `foo`, and `exact => true`, +the destination field will be populated with the string `bar`. +If `exact => false`, and logstash receives the same event, the destination field +will be also set to `bar`. However, if logstash receives an event with the `data` field +set to `foofing`, the destination field will be set to `barfing`. + +Set both `exact => true` AND `regex => `true` if you would like to match using dictionary +keys as regular expressions. A large dictionary could be expensive to match in this case. + +[id="{version}-plugins-{type}s-{plugin}-fallback"] +===== `fallback` + + * Value type is <> + * There is no default value for this setting. + +In case no translation occurs in the event (no matches), this will add a default +translation string, which will always populate `field`, if the match failed. + +For example, if we have configured `fallback => "no match"`, using this dictionary: +[source,ruby] + foo: bar + +Then, if logstash received an event with the field `foo` set to `bar`, the destination +field would be set to `bar`. However, if logstash received an event with `foo` set to `nope`, +then the destination field would still be populated, but with the value of `no match`. +This configuration can be dynamic and include parts of the event using the `%{field}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-field"] +===== `field` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the logstash event field containing the value to be compared for a +match by the translate filter (e.g. `message`, `host`, `response_code`). + +If this field is an array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-override"] +===== `override` + + * Value type is <> + * Default value is `false` + +If the destination (or target) field already exists, this configuration item specifies +whether the filter should skip translation (default) or overwrite the target field +value with the new translation value. + +[id="{version}-plugins-{type}s-{plugin}-refresh_interval"] +===== `refresh_interval` + + * Value type is <> + * Default value is `300` + +When using a dictionary file, this setting will indicate how frequently +(in seconds) logstash will check the dictionary file for updates. + +[id="{version}-plugins-{type}s-{plugin}-regex"] +===== `regex` + + * Value type is <> + * Default value is `false` + +If you'd like to treat dictionary keys as regular expressions, set `exact => true`. +Note: this is activated only when `exact => true`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/translate-v3.0.4.asciidoc b/docs/versioned-plugins/filters/translate-v3.0.4.asciidoc new file mode 100644 index 000000000..d44551ccf --- /dev/null +++ b/docs/versioned-plugins/filters/translate-v3.0.4.asciidoc @@ -0,0 +1,211 @@ +:plugin: translate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-translate/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Translate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +A general search and replace tool that uses a configured hash +and/or a file to determine replacement values. Currently supported are +YAML, JSON, and CSV files. + +The dictionary entries can be specified in one of two ways: First, +the `dictionary` configuration item may contain a hash representing +the mapping. Second, an external file (readable by logstash) may be specified +in the `dictionary_path` configuration item. These two methods may not be used +in conjunction; it will produce an error. + +Operationally, if the event field specified in the `field` configuration +matches the EXACT contents of a dictionary entry key (or matches a regex if +`regex` configuration item has been enabled), the field's value will be substituted +with the matched key's value from the dictionary. + +By default, the translate filter will replace the contents of the +maching event field (in-place). However, by using the `destination` +configuration item, you may also specify a target event field to +populate with the new translated value. + +Alternatively, for simple string search and replacements for just a few values +you might consider using the gsub function of the mutate filter. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Translate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dictionary>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dictionary_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-exact>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fallback>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-override>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-refresh_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-regex>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-destination"] +===== `destination` + + * Value type is <> + * Default value is `"translation"` + +The destination field you wish to populate with the translated code. The default +is a field named `translation`. Set this to the same value as source if you want +to do a substitution, in this case filter will allways succeed. This will clobber +the old value of the source field! + +[id="{version}-plugins-{type}s-{plugin}-dictionary"] +===== `dictionary` + + * Value type is <> + * Default value is `{}` + +The dictionary to use for translation, when specified in the logstash filter +configuration item (i.e. do not use the `@dictionary_path` file). + +Example: +[source,ruby] + filter { + translate { + dictionary => [ "100", "Continue", + "101", "Switching Protocols", + "merci", "thank you", + "old version", "new version" ] + } + } + +NOTE: It is an error to specify both `dictionary` and `dictionary_path`. + +[id="{version}-plugins-{type}s-{plugin}-dictionary_path"] +===== `dictionary_path` + + * Value type is <> + * There is no default value for this setting. + +The full path of the external dictionary file. The format of the table +should be a standard YAML, JSON, or CSV. Make sure you specify any integer-based keys +in quotes. For example, the YAML file should look something like this: +[source,ruby] + "100": Continue + "101": Switching Protocols + merci: gracias + old version: new version + +NOTE: it is an error to specify both `dictionary` and `dictionary_path`. + +The currently supported formats are YAML, JSON, and CSV. Format selection is +based on the file extension: `json` for JSON, `yaml` or `yml` for YAML, and +`csv` for CSV. The JSON format only supports simple key/value, unnested +objects. The CSV format expects exactly two columns, with the first serving +as the original text, and the second column as the replacement. + +[id="{version}-plugins-{type}s-{plugin}-exact"] +===== `exact` + + * Value type is <> + * Default value is `true` + +When `exact => true`, the translate filter will populate the destination field +with the exact contents of the dictionary value. When `exact => false`, the +filter will populate the destination field with the result of any existing +destination field's data, with the translated value substituted in-place. + +For example, consider this simple translation.yml, configured to check the `data` field: +[source,ruby] + foo: bar + +If logstash receives an event with the `data` field set to `foo`, and `exact => true`, +the destination field will be populated with the string `bar`. +If `exact => false`, and logstash receives the same event, the destination field +will be also set to `bar`. However, if logstash receives an event with the `data` field +set to `foofing`, the destination field will be set to `barfing`. + +Set both `exact => true` AND `regex => `true` if you would like to match using dictionary +keys as regular expressions. A large dictionary could be expensive to match in this case. + +[id="{version}-plugins-{type}s-{plugin}-fallback"] +===== `fallback` + + * Value type is <> + * There is no default value for this setting. + +In case no translation occurs in the event (no matches), this will add a default +translation string, which will always populate `field`, if the match failed. + +For example, if we have configured `fallback => "no match"`, using this dictionary: +[source,ruby] + foo: bar + +Then, if logstash received an event with the field `foo` set to `bar`, the destination +field would be set to `bar`. However, if logstash received an event with `foo` set to `nope`, +then the destination field would still be populated, but with the value of `no match`. +This configuration can be dynamic and include parts of the event using the `%{field}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-field"] +===== `field` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the logstash event field containing the value to be compared for a +match by the translate filter (e.g. `message`, `host`, `response_code`). + +If this field is an array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-override"] +===== `override` + + * Value type is <> + * Default value is `false` + +If the destination (or target) field already exists, this configuration item specifies +whether the filter should skip translation (default) or overwrite the target field +value with the new translation value. + +[id="{version}-plugins-{type}s-{plugin}-refresh_interval"] +===== `refresh_interval` + + * Value type is <> + * Default value is `300` + +When using a dictionary file, this setting will indicate how frequently +(in seconds) logstash will check the dictionary file for updates. + +[id="{version}-plugins-{type}s-{plugin}-regex"] +===== `regex` + + * Value type is <> + * Default value is `false` + +If you'd like to treat dictionary keys as regular expressions, set `exact => true`. +Note: this is activated only when `exact => true`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/truncate-index.asciidoc b/docs/versioned-plugins/filters/truncate-index.asciidoc new file mode 100644 index 000000000..40cfb3ddd --- /dev/null +++ b/docs/versioned-plugins/filters/truncate-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: truncate +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::truncate-v1.0.4.asciidoc[] +include::truncate-v1.0.3.asciidoc[] +include::truncate-v1.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/filters/truncate-v1.0.2.asciidoc b/docs/versioned-plugins/filters/truncate-v1.0.2.asciidoc new file mode 100644 index 000000000..d6be312a3 --- /dev/null +++ b/docs/versioned-plugins/filters/truncate-v1.0.2.asciidoc @@ -0,0 +1,84 @@ +:plugin: truncate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-truncate/blob/v1.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Truncate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Allows you to truncate fields longer than a given length. + +This truncates on bytes values, not character count. In practice, this +should mean that the truncated length is somewhere between `length_bytes` and +`length_bytes - 6` (UTF-8 supports up to 6-byte characters). + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Truncate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-length_bytes>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * There is no default value for this setting. + +A list of fieldrefs to truncate if they are too long. + +If not specified, the default behavior will be to attempt truncation on all +strings in the event. This default behavior could be computationally +expensive, so if you know exactly which fields you wish to truncate, it is +advised that you be specific and configure the fields you want truncated. + +Special behaviors for non-string fields: + +* Numbers: No action +* Array: this plugin will attempt truncation on all elements of that array. +* Hash: truncate will try all values of the hash (recursively, if this hash +contains other hashes). + +[id="{version}-plugins-{type}s-{plugin}-length_bytes"] +===== `length_bytes` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Fields over this length will be truncated to this length. + +Truncation happens from the end of the text (the start will be kept). + +As an example, if you set `length_bytes => 10` and a field contains "hello +world, how are you?", then this field will be truncated and have this value: +"hello worl" + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/truncate-v1.0.3.asciidoc b/docs/versioned-plugins/filters/truncate-v1.0.3.asciidoc new file mode 100644 index 000000000..ff4796cda --- /dev/null +++ b/docs/versioned-plugins/filters/truncate-v1.0.3.asciidoc @@ -0,0 +1,84 @@ +:plugin: truncate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-truncate/blob/v1.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Truncate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Allows you to truncate fields longer than a given length. + +This truncates on bytes values, not character count. In practice, this +should mean that the truncated length is somewhere between `length_bytes` and +`length_bytes - 6` (UTF-8 supports up to 6-byte characters). + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Truncate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-length_bytes>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * There is no default value for this setting. + +A list of fieldrefs to truncate if they are too long. + +If not specified, the default behavior will be to attempt truncation on all +strings in the event. This default behavior could be computationally +expensive, so if you know exactly which fields you wish to truncate, it is +advised that you be specific and configure the fields you want truncated. + +Special behaviors for non-string fields: + +* Numbers: No action +* Array: this plugin will attempt truncation on all elements of that array. +* Hash: truncate will try all values of the hash (recursively, if this hash +contains other hashes). + +[id="{version}-plugins-{type}s-{plugin}-length_bytes"] +===== `length_bytes` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Fields over this length will be truncated to this length. + +Truncation happens from the end of the text (the start will be kept). + +As an example, if you set `length_bytes => 10` and a field contains "hello +world, how are you?", then this field will be truncated and have this value: +"hello worl" + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/truncate-v1.0.4.asciidoc b/docs/versioned-plugins/filters/truncate-v1.0.4.asciidoc new file mode 100644 index 000000000..d13593c76 --- /dev/null +++ b/docs/versioned-plugins/filters/truncate-v1.0.4.asciidoc @@ -0,0 +1,84 @@ +:plugin: truncate +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.4 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-truncate/blob/v1.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Truncate filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Allows you to truncate fields longer than a given length. + +This truncates on bytes values, not character count. In practice, this +should mean that the truncated length is somewhere between `length_bytes` and +`length_bytes - 6` (UTF-8 supports up to 6-byte characters). + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Truncate Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-length_bytes>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * Value type is <> + * There is no default value for this setting. + +A list of fieldrefs to truncate if they are too long. + +If not specified, the default behavior will be to attempt truncation on all +strings in the event. This default behavior could be computationally +expensive, so if you know exactly which fields you wish to truncate, it is +advised that you be specific and configure the fields you want truncated. + +Special behaviors for non-string fields: + +* Numbers: No action +* Array: this plugin will attempt truncation on all elements of that array. +* Hash: truncate will try all values of the hash (recursively, if this hash +contains other hashes). + +[id="{version}-plugins-{type}s-{plugin}-length_bytes"] +===== `length_bytes` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Fields over this length will be truncated to this length. + +Truncation happens from the end of the text (the start will be kept). + +As an example, if you set `length_bytes => 10` and a field contains "hello +world, how are you?", then this field will be truncated and have this value: +"hello worl" + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/unique-index.asciidoc b/docs/versioned-plugins/filters/unique-index.asciidoc new file mode 100644 index 000000000..e36df6726 --- /dev/null +++ b/docs/versioned-plugins/filters/unique-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: unique +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-13 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::unique-v3.0.0.asciidoc[] +include::unique-v2.0.6.asciidoc[] +include::unique-v2.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/filters/unique-v2.0.5.asciidoc b/docs/versioned-plugins/filters/unique-v2.0.5.asciidoc new file mode 100644 index 000000000..9585a48dc --- /dev/null +++ b/docs/versioned-plugins/filters/unique-v2.0.5.asciidoc @@ -0,0 +1,53 @@ +:plugin: unique +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-unique/blob/v2.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Unique filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Unique Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The fields on which to run the unique filter. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/unique-v2.0.6.asciidoc b/docs/versioned-plugins/filters/unique-v2.0.6.asciidoc new file mode 100644 index 000000000..87b6252b0 --- /dev/null +++ b/docs/versioned-plugins/filters/unique-v2.0.6.asciidoc @@ -0,0 +1,53 @@ +:plugin: unique +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.6 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-unique/blob/v2.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Unique filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Unique Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The fields on which to run the unique filter. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/unique-v3.0.0.asciidoc b/docs/versioned-plugins/filters/unique-v3.0.0.asciidoc new file mode 100644 index 000000000..23802949d --- /dev/null +++ b/docs/versioned-plugins/filters/unique-v3.0.0.asciidoc @@ -0,0 +1,53 @@ +:plugin: unique +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.0 +:release_date: 2017-12-13 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-unique/blob/v3.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Unique filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Unique Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The fields on which to run the unique filter. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/urldecode-index.asciidoc b/docs/versioned-plugins/filters/urldecode-index.asciidoc new file mode 100644 index 000000000..5c842a410 --- /dev/null +++ b/docs/versioned-plugins/filters/urldecode-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: urldecode +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::urldecode-v3.0.6.asciidoc[] +include::urldecode-v3.0.5.asciidoc[] +include::urldecode-v3.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/filters/urldecode-v3.0.4.asciidoc b/docs/versioned-plugins/filters/urldecode-v3.0.4.asciidoc new file mode 100644 index 000000000..d45825ba1 --- /dev/null +++ b/docs/versioned-plugins/filters/urldecode-v3.0.4.asciidoc @@ -0,0 +1,83 @@ +:plugin: urldecode +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-urldecode/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Urldecode filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The urldecode filter is for decoding fields that are urlencoded. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Urldecode Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-all_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-all_fields"] +===== `all_fields` + + * Value type is <> + * Default value is `false` + +Urldecode all fields + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +Thel character encoding used in this filter. Examples include `UTF-8` +and `cp1252` + +This setting is useful if your url decoded string are in `Latin-1` (aka `cp1252`) +or in another character set other than `UTF-8`. + +[id="{version}-plugins-{type}s-{plugin}-field"] +===== `field` + + * Value type is <> + * Default value is `"message"` + +The field which value is urldecoded + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_urldecodefailure"]` + +Append values to the `tags` field when an exception is thrown + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/urldecode-v3.0.5.asciidoc b/docs/versioned-plugins/filters/urldecode-v3.0.5.asciidoc new file mode 100644 index 000000000..3827e8217 --- /dev/null +++ b/docs/versioned-plugins/filters/urldecode-v3.0.5.asciidoc @@ -0,0 +1,83 @@ +:plugin: urldecode +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-urldecode/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Urldecode filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The urldecode filter is for decoding fields that are urlencoded. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Urldecode Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-all_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-all_fields"] +===== `all_fields` + + * Value type is <> + * Default value is `false` + +Urldecode all fields + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +Thel character encoding used in this filter. Examples include `UTF-8` +and `cp1252` + +This setting is useful if your url decoded string are in `Latin-1` (aka `cp1252`) +or in another character set other than `UTF-8`. + +[id="{version}-plugins-{type}s-{plugin}-field"] +===== `field` + + * Value type is <> + * Default value is `"message"` + +The field which value is urldecoded + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_urldecodefailure"]` + +Append values to the `tags` field when an exception is thrown + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/urldecode-v3.0.6.asciidoc b/docs/versioned-plugins/filters/urldecode-v3.0.6.asciidoc new file mode 100644 index 000000000..4085edc58 --- /dev/null +++ b/docs/versioned-plugins/filters/urldecode-v3.0.6.asciidoc @@ -0,0 +1,83 @@ +:plugin: urldecode +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-urldecode/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Urldecode filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The urldecode filter is for decoding fields that are urlencoded. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Urldecode Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-all_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-all_fields"] +===== `all_fields` + + * Value type is <> + * Default value is `false` + +Urldecode all fields + +[id="{version}-plugins-{type}s-{plugin}-charset"] +===== `charset` + + * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` + * Default value is `"UTF-8"` + +Thel character encoding used in this filter. Examples include `UTF-8` +and `cp1252` + +This setting is useful if your url decoded string are in `Latin-1` (aka `cp1252`) +or in another character set other than `UTF-8`. + +[id="{version}-plugins-{type}s-{plugin}-field"] +===== `field` + + * Value type is <> + * Default value is `"message"` + +The field which value is urldecoded + +[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] +===== `tag_on_failure` + + * Value type is <> + * Default value is `["_urldecodefailure"]` + +Append values to the `tags` field when an exception is thrown + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/useragent-index.asciidoc b/docs/versioned-plugins/filters/useragent-index.asciidoc new file mode 100644 index 000000000..0f51eeb6f --- /dev/null +++ b/docs/versioned-plugins/filters/useragent-index.asciidoc @@ -0,0 +1,22 @@ +:plugin: useragent +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-07-10 +| <> | 2017-07-06 +| <> | 2017-05-15 +| <> | 2017-05-10 +|======================================================================= + +include::useragent-v3.2.2.asciidoc[] +include::useragent-v3.2.1.asciidoc[] +include::useragent-v3.2.0.asciidoc[] +include::useragent-v3.1.3.asciidoc[] +include::useragent-v3.1.1.asciidoc[] +include::useragent-v3.1.0.asciidoc[] + diff --git a/docs/versioned-plugins/filters/useragent-v3.1.0.asciidoc b/docs/versioned-plugins/filters/useragent-v3.1.0.asciidoc new file mode 100644 index 000000000..dd3e6f450 --- /dev/null +++ b/docs/versioned-plugins/filters/useragent-v3.1.0.asciidoc @@ -0,0 +1,117 @@ +:plugin: useragent +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.0 +:release_date: 2017-05-10 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-useragent/blob/v3.1.0/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Useragent + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse user agent strings into structured data based on BrowserScope data + +UserAgent filter, adds information about user agent like family, operating +system, version, and device + +Logstash releases ship with the regexes.yaml database made available from +ua-parser with an Apache 2.0 license. For more details on ua-parser, see +. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Useragent Filter Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-lru_cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-regexes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] +===== `lru_cache_size` + + * Value type is <> + * Default value is `1000` + +UA parsing is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that +user agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. + +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global. That is to say all instances of the user agent filter +share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is <> + * Default value is `""` + +A string to prepend to all of the extracted keys + +[id="{version}-plugins-{type}s-{plugin}-regexes"] +===== `regexes` + + * Value type is <> + * There is no default value for this setting. + +`regexes.yaml` file to use + +If not specified, this will default to the `regexes.yaml` that ships +with logstash. + +You can find the latest version of this here: + + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field containing the user agent string. If this field is an +array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +The name of the field to assign user agent data into. + +If not specified user agent data will be stored in the root of the event. + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/useragent-v3.1.1.asciidoc b/docs/versioned-plugins/filters/useragent-v3.1.1.asciidoc new file mode 100644 index 000000000..55fa6aa1c --- /dev/null +++ b/docs/versioned-plugins/filters/useragent-v3.1.1.asciidoc @@ -0,0 +1,117 @@ +:plugin: useragent +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.1 +:release_date: 2017-05-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-useragent/blob/v3.1.1/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Useragent + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse user agent strings into structured data based on BrowserScope data + +UserAgent filter, adds information about user agent like family, operating +system, version, and device + +Logstash releases ship with the regexes.yaml database made available from +ua-parser with an Apache 2.0 license. For more details on ua-parser, see +. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Useragent Filter Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-lru_cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-regexes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] +===== `lru_cache_size` + + * Value type is <> + * Default value is `1000` + +UA parsing is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that +user agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. + +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global. That is to say all instances of the user agent filter +share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is <> + * Default value is `""` + +A string to prepend to all of the extracted keys + +[id="{version}-plugins-{type}s-{plugin}-regexes"] +===== `regexes` + + * Value type is <> + * There is no default value for this setting. + +`regexes.yaml` file to use + +If not specified, this will default to the `regexes.yaml` that ships +with logstash. + +You can find the latest version of this here: + + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field containing the user agent string. If this field is an +array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +The name of the field to assign user agent data into. + +If not specified user agent data will be stored in the root of the event. + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/useragent-v3.1.3.asciidoc b/docs/versioned-plugins/filters/useragent-v3.1.3.asciidoc new file mode 100644 index 000000000..85c7340ad --- /dev/null +++ b/docs/versioned-plugins/filters/useragent-v3.1.3.asciidoc @@ -0,0 +1,118 @@ +:plugin: useragent +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-07-06 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-useragent/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Useragent filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse user agent strings into structured data based on BrowserScope data + +UserAgent filter, adds information about user agent like family, operating +system, version, and device + +Logstash releases ship with the regexes.yaml database made available from +ua-parser with an Apache 2.0 license. For more details on ua-parser, see +. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Useragent Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-lru_cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-regexes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] +===== `lru_cache_size` + + * Value type is <> + * Default value is `1000` + +UA parsing is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that +user agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. + +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global. That is to say all instances of the user agent filter +share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is <> + * Default value is `""` + +A string to prepend to all of the extracted keys + +[id="{version}-plugins-{type}s-{plugin}-regexes"] +===== `regexes` + + * Value type is <> + * There is no default value for this setting. + +`regexes.yaml` file to use + +If not specified, this will default to the `regexes.yaml` that ships +with logstash. + +You can find the latest version of this here: + + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field containing the user agent string. If this field is an +array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +The name of the field to assign user agent data into. + +If not specified user agent data will be stored in the root of the event. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/useragent-v3.2.0.asciidoc b/docs/versioned-plugins/filters/useragent-v3.2.0.asciidoc new file mode 100644 index 000000000..ad2b8f5f3 --- /dev/null +++ b/docs/versioned-plugins/filters/useragent-v3.2.0.asciidoc @@ -0,0 +1,118 @@ +:plugin: useragent +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.0 +:release_date: 2017-07-10 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-useragent/blob/v3.2.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Useragent filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse user agent strings into structured data based on BrowserScope data + +UserAgent filter, adds information about user agent like family, operating +system, version, and device + +Logstash releases ship with the regexes.yaml database made available from +ua-parser with an Apache 2.0 license. For more details on ua-parser, see +. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Useragent Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-lru_cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-regexes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] +===== `lru_cache_size` + + * Value type is <> + * Default value is `1000` + +UA parsing is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that +user agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. + +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global. That is to say all instances of the user agent filter +share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is <> + * Default value is `""` + +A string to prepend to all of the extracted keys + +[id="{version}-plugins-{type}s-{plugin}-regexes"] +===== `regexes` + + * Value type is <> + * There is no default value for this setting. + +`regexes.yaml` file to use + +If not specified, this will default to the `regexes.yaml` that ships +with logstash. + +You can find the latest version of this here: + + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field containing the user agent string. If this field is an +array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +The name of the field to assign user agent data into. + +If not specified user agent data will be stored in the root of the event. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/useragent-v3.2.1.asciidoc b/docs/versioned-plugins/filters/useragent-v3.2.1.asciidoc new file mode 100644 index 000000000..27ab74ef8 --- /dev/null +++ b/docs/versioned-plugins/filters/useragent-v3.2.1.asciidoc @@ -0,0 +1,118 @@ +:plugin: useragent +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.1 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-useragent/blob/v3.2.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Useragent filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse user agent strings into structured data based on BrowserScope data + +UserAgent filter, adds information about user agent like family, operating +system, version, and device + +Logstash releases ship with the regexes.yaml database made available from +ua-parser with an Apache 2.0 license. For more details on ua-parser, see +. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Useragent Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-lru_cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-regexes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] +===== `lru_cache_size` + + * Value type is <> + * Default value is `1000` + +UA parsing is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that +user agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. + +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global. That is to say all instances of the user agent filter +share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is <> + * Default value is `""` + +A string to prepend to all of the extracted keys + +[id="{version}-plugins-{type}s-{plugin}-regexes"] +===== `regexes` + + * Value type is <> + * There is no default value for this setting. + +`regexes.yaml` file to use + +If not specified, this will default to the `regexes.yaml` that ships +with logstash. + +You can find the latest version of this here: + + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field containing the user agent string. If this field is an +array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +The name of the field to assign user agent data into. + +If not specified user agent data will be stored in the root of the event. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/useragent-v3.2.2.asciidoc b/docs/versioned-plugins/filters/useragent-v3.2.2.asciidoc new file mode 100644 index 000000000..d8f42111b --- /dev/null +++ b/docs/versioned-plugins/filters/useragent-v3.2.2.asciidoc @@ -0,0 +1,118 @@ +:plugin: useragent +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.2 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-useragent/blob/v3.2.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Useragent filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Parse user agent strings into structured data based on BrowserScope data + +UserAgent filter, adds information about user agent like family, operating +system, version, and device + +Logstash releases ship with the regexes.yaml database made available from +ua-parser with an Apache 2.0 license. For more details on ua-parser, see +. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Useragent Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-lru_cache_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-regexes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] +===== `lru_cache_size` + + * Value type is <> + * Default value is `1000` + +UA parsing is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that +user agents are often found adjacent to one another in log files and rarely have a random distribution. +The higher you set this the more likely an item is to be in the cache and the faster this filter will run. +However, if you set this too high you can use more memory than desired. + +Experiment with different values for this option to find the best performance for your dataset. + +This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal +and the speed gains are large. + +It is important to note that this config value is global. That is to say all instances of the user agent filter +share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit +to having multiple caches for different instances at different points in the pipeline, that would just increase the +number of cache misses and waste memory. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is <> + * Default value is `""` + +A string to prepend to all of the extracted keys + +[id="{version}-plugins-{type}s-{plugin}-regexes"] +===== `regexes` + + * Value type is <> + * There is no default value for this setting. + +`regexes.yaml` file to use + +If not specified, this will default to the `regexes.yaml` that ships +with logstash. + +You can find the latest version of this here: + + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field containing the user agent string. If this field is an +array, only the first value will be used. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +The name of the field to assign user agent data into. + +If not specified user agent data will be stored in the root of the event. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/uuid-index.asciidoc b/docs/versioned-plugins/filters/uuid-index.asciidoc new file mode 100644 index 000000000..0895fd830 --- /dev/null +++ b/docs/versioned-plugins/filters/uuid-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: uuid +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::uuid-v3.0.5.asciidoc[] +include::uuid-v3.0.4.asciidoc[] +include::uuid-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/filters/uuid-v3.0.3.asciidoc b/docs/versioned-plugins/filters/uuid-v3.0.3.asciidoc new file mode 100644 index 000000000..6f3565034 --- /dev/null +++ b/docs/versioned-plugins/filters/uuid-v3.0.3.asciidoc @@ -0,0 +1,95 @@ +:plugin: uuid +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-uuid/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Uuid filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The uuid filter allows you to generate a +https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID] +and add it as a field to each processed event. + +This is useful if you need to generate a string that's unique for every +event, even if the same input is processed multiple times. If you want +to generate strings that are identical each time a event with a given +content is processed (i.e. a hash) you should use the +<> instead. + +The generated UUIDs follow the version 4 definition in +https://tools.ietf.org/html/rfc4122[RFC 4122]) and will be +represented as a standard hexadecimal string format, e.g. +"e08806fe-02af-406c-bbde-8a5ae4475e57". + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Uuid Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-overwrite"] +===== `overwrite` + + * Value type is <> + * Default value is `false` + +If the value in the field currently (if any) should be overridden +by the generated UUID. Defaults to `false` (i.e. if the field is +present, with ANY value, it won't be overridden) + +Example: +[source,ruby] + filter { + uuid { + target => "uuid" + overwrite => true + } + } + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Select the name of the field where the generated UUID should be +stored. + +Example: +[source,ruby] + filter { + uuid { + target => "uuid" + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/uuid-v3.0.4.asciidoc b/docs/versioned-plugins/filters/uuid-v3.0.4.asciidoc new file mode 100644 index 000000000..7f737a590 --- /dev/null +++ b/docs/versioned-plugins/filters/uuid-v3.0.4.asciidoc @@ -0,0 +1,95 @@ +:plugin: uuid +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-uuid/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Uuid filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The uuid filter allows you to generate a +https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID] +and add it as a field to each processed event. + +This is useful if you need to generate a string that's unique for every +event, even if the same input is processed multiple times. If you want +to generate strings that are identical each time a event with a given +content is processed (i.e. a hash) you should use the +<> instead. + +The generated UUIDs follow the version 4 definition in +https://tools.ietf.org/html/rfc4122[RFC 4122]) and will be +represented as a standard hexadecimal string format, e.g. +"e08806fe-02af-406c-bbde-8a5ae4475e57". + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Uuid Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-overwrite"] +===== `overwrite` + + * Value type is <> + * Default value is `false` + +If the value in the field currently (if any) should be overridden +by the generated UUID. Defaults to `false` (i.e. if the field is +present, with ANY value, it won't be overridden) + +Example: +[source,ruby] + filter { + uuid { + target => "uuid" + overwrite => true + } + } + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Select the name of the field where the generated UUID should be +stored. + +Example: +[source,ruby] + filter { + uuid { + target => "uuid" + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/uuid-v3.0.5.asciidoc b/docs/versioned-plugins/filters/uuid-v3.0.5.asciidoc new file mode 100644 index 000000000..586a1aba4 --- /dev/null +++ b/docs/versioned-plugins/filters/uuid-v3.0.5.asciidoc @@ -0,0 +1,95 @@ +:plugin: uuid +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-uuid/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Uuid filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The uuid filter allows you to generate a +https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID] +and add it as a field to each processed event. + +This is useful if you need to generate a string that's unique for every +event, even if the same input is processed multiple times. If you want +to generate strings that are identical each time a event with a given +content is processed (i.e. a hash) you should use the +<> instead. + +The generated UUIDs follow the version 4 definition in +https://tools.ietf.org/html/rfc4122[RFC 4122]) and will be +represented as a standard hexadecimal string format, e.g. +"e08806fe-02af-406c-bbde-8a5ae4475e57". + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Uuid Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-overwrite"] +===== `overwrite` + + * Value type is <> + * Default value is `false` + +If the value in the field currently (if any) should be overridden +by the generated UUID. Defaults to `false` (i.e. if the field is +present, with ANY value, it won't be overridden) + +Example: +[source,ruby] + filter { + uuid { + target => "uuid" + overwrite => true + } + } + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Select the name of the field where the generated UUID should be +stored. + +Example: +[source,ruby] + filter { + uuid { + target => "uuid" + } + } + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/xml-index.asciidoc b/docs/versioned-plugins/filters/xml-index.asciidoc new file mode 100644 index 000000000..3e29bb025 --- /dev/null +++ b/docs/versioned-plugins/filters/xml-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: xml +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::xml-v4.0.5.asciidoc[] +include::xml-v4.0.4.asciidoc[] +include::xml-v4.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/filters/xml-v4.0.3.asciidoc b/docs/versioned-plugins/filters/xml-v4.0.3.asciidoc new file mode 100644 index 000000000..4964fb159 --- /dev/null +++ b/docs/versioned-plugins/filters/xml-v4.0.3.asciidoc @@ -0,0 +1,187 @@ +:plugin: xml +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-xml/blob/v4.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Xml filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +XML filter. Takes a field that contains XML and expands it into +an actual datastructure. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Xml Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-force_array>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-force_content>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-namespaces>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-remove_namespaces>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-store_xml>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-suppress_empty>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-xpath>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-force_array"] +===== `force_array` + + * Value type is <> + * Default value is `true` + +By default the filter will force single elements to be arrays. Setting this to +false will prevent storing single elements in arrays. + +[id="{version}-plugins-{type}s-{plugin}-force_content"] +===== `force_content` + + * Value type is <> + * Default value is `false` + +By default the filter will expand attributes differently from content inside +of tags. This option allows you to force text content and attributes to always +parse to a hash value. + +[id="{version}-plugins-{type}s-{plugin}-namespaces"] +===== `namespaces` + + * Value type is <> + * Default value is `{}` + +By default only namespaces declarations on the root element are considered. +This allows to configure all namespace declarations to parse the XML document. + +Example: + +[source,ruby] +filter { + xml { + namespaces => { + "xsl" => "http://www.w3.org/1999/XSL/Transform" + "xhtml" => http://www.w3.org/1999/xhtml" + } + } +} + + +[id="{version}-plugins-{type}s-{plugin}-remove_namespaces"] +===== `remove_namespaces` + + * Value type is <> + * Default value is `false` + +Remove all namespaces from all nodes in the document. +Of course, if the document had nodes with the same names but different namespaces, they will now be ambiguous. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Config for xml to hash is: +[source,ruby] + source => source_field + +For example, if you have the whole XML document in your `message` field: +[source,ruby] + filter { + xml { + source => "message" + } + } + +The above would parse the XML from the `message` field. + +[id="{version}-plugins-{type}s-{plugin}-store_xml"] +===== `store_xml` + + * Value type is <> + * Default value is `true` + +By default the filter will store the whole parsed XML in the destination +field as described above. Setting this to false will prevent that. + +[id="{version}-plugins-{type}s-{plugin}-suppress_empty"] +===== `suppress_empty` + + * Value type is <> + * Default value is `true` + +By default, output nothing if the element is empty. +If set to `false`, empty element will result in an empty hash object. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define target for placing the data + +For example if you want the data to be put in the `doc` field: +[source,ruby] + filter { + xml { + target => "doc" + } + } + +XML in the value of the source field will be expanded into a +datastructure in the `target` field. +Note: if the `target` field already exists, it will be overridden. +Required if `store_xml` is true (which is the default). + +[id="{version}-plugins-{type}s-{plugin}-xpath"] +===== `xpath` + + * Value type is <> + * Default value is `{}` + +xpath will additionally select string values (non-strings will be +converted to strings with Ruby's `to_s` function) from parsed XML +(using each source field defined using the method above) and place +those values in the destination fields. Configuration: +[source,ruby] +xpath => [ "xpath-syntax", "destination-field" ] + +Values returned by XPath parsing from `xpath-syntax` will be put in the +destination field. Multiple values returned will be pushed onto the +destination field as an array. As such, multiple matches across +multiple source fields will produce duplicate entries in the field. + +More on XPath: http://www.w3schools.com/xml/xml_xpath.asp + +The XPath functions are particularly powerful: +http://www.w3schools.com/xsl/xsl_functions.asp + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/xml-v4.0.4.asciidoc b/docs/versioned-plugins/filters/xml-v4.0.4.asciidoc new file mode 100644 index 000000000..085ffde90 --- /dev/null +++ b/docs/versioned-plugins/filters/xml-v4.0.4.asciidoc @@ -0,0 +1,187 @@ +:plugin: xml +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-xml/blob/v4.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Xml filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +XML filter. Takes a field that contains XML and expands it into +an actual datastructure. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Xml Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-force_array>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-force_content>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-namespaces>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-remove_namespaces>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-store_xml>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-suppress_empty>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-xpath>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-force_array"] +===== `force_array` + + * Value type is <> + * Default value is `true` + +By default the filter will force single elements to be arrays. Setting this to +false will prevent storing single elements in arrays. + +[id="{version}-plugins-{type}s-{plugin}-force_content"] +===== `force_content` + + * Value type is <> + * Default value is `false` + +By default the filter will expand attributes differently from content inside +of tags. This option allows you to force text content and attributes to always +parse to a hash value. + +[id="{version}-plugins-{type}s-{plugin}-namespaces"] +===== `namespaces` + + * Value type is <> + * Default value is `{}` + +By default only namespaces declarations on the root element are considered. +This allows to configure all namespace declarations to parse the XML document. + +Example: + +[source,ruby] +filter { + xml { + namespaces => { + "xsl" => "http://www.w3.org/1999/XSL/Transform" + "xhtml" => "http://www.w3.org/1999/xhtml" + } + } +} + + +[id="{version}-plugins-{type}s-{plugin}-remove_namespaces"] +===== `remove_namespaces` + + * Value type is <> + * Default value is `false` + +Remove all namespaces from all nodes in the document. +Of course, if the document had nodes with the same names but different namespaces, they will now be ambiguous. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Config for xml to hash is: +[source,ruby] + source => source_field + +For example, if you have the whole XML document in your `message` field: +[source,ruby] + filter { + xml { + source => "message" + } + } + +The above would parse the XML from the `message` field. + +[id="{version}-plugins-{type}s-{plugin}-store_xml"] +===== `store_xml` + + * Value type is <> + * Default value is `true` + +By default the filter will store the whole parsed XML in the destination +field as described above. Setting this to false will prevent that. + +[id="{version}-plugins-{type}s-{plugin}-suppress_empty"] +===== `suppress_empty` + + * Value type is <> + * Default value is `true` + +By default, output nothing if the element is empty. +If set to `false`, empty element will result in an empty hash object. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define target for placing the data + +For example if you want the data to be put in the `doc` field: +[source,ruby] + filter { + xml { + target => "doc" + } + } + +XML in the value of the source field will be expanded into a +datastructure in the `target` field. +Note: if the `target` field already exists, it will be overridden. +Required if `store_xml` is true (which is the default). + +[id="{version}-plugins-{type}s-{plugin}-xpath"] +===== `xpath` + + * Value type is <> + * Default value is `{}` + +xpath will additionally select string values (non-strings will be +converted to strings with Ruby's `to_s` function) from parsed XML +(using each source field defined using the method above) and place +those values in the destination fields. Configuration: +[source,ruby] +xpath => [ "xpath-syntax", "destination-field" ] + +Values returned by XPath parsing from `xpath-syntax` will be put in the +destination field. Multiple values returned will be pushed onto the +destination field as an array. As such, multiple matches across +multiple source fields will produce duplicate entries in the field. + +More on XPath: http://www.w3schools.com/xml/xml_xpath.asp + +The XPath functions are particularly powerful: +http://www.w3schools.com/xsl/xsl_functions.asp + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/xml-v4.0.5.asciidoc b/docs/versioned-plugins/filters/xml-v4.0.5.asciidoc new file mode 100644 index 000000000..d3908bd2d --- /dev/null +++ b/docs/versioned-plugins/filters/xml-v4.0.5.asciidoc @@ -0,0 +1,187 @@ +:plugin: xml +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-xml/blob/v4.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Xml filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +XML filter. Takes a field that contains XML and expands it into +an actual datastructure. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Xml Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-force_array>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-force_content>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-namespaces>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-remove_namespaces>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-store_xml>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-suppress_empty>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-xpath>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-force_array"] +===== `force_array` + + * Value type is <> + * Default value is `true` + +By default the filter will force single elements to be arrays. Setting this to +false will prevent storing single elements in arrays. + +[id="{version}-plugins-{type}s-{plugin}-force_content"] +===== `force_content` + + * Value type is <> + * Default value is `false` + +By default the filter will expand attributes differently from content inside +of tags. This option allows you to force text content and attributes to always +parse to a hash value. + +[id="{version}-plugins-{type}s-{plugin}-namespaces"] +===== `namespaces` + + * Value type is <> + * Default value is `{}` + +By default only namespaces declarations on the root element are considered. +This allows to configure all namespace declarations to parse the XML document. + +Example: + +[source,ruby] +filter { + xml { + namespaces => { + "xsl" => "http://www.w3.org/1999/XSL/Transform" + "xhtml" => "http://www.w3.org/1999/xhtml" + } + } +} + + +[id="{version}-plugins-{type}s-{plugin}-remove_namespaces"] +===== `remove_namespaces` + + * Value type is <> + * Default value is `false` + +Remove all namespaces from all nodes in the document. +Of course, if the document had nodes with the same names but different namespaces, they will now be ambiguous. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Config for xml to hash is: +[source,ruby] + source => source_field + +For example, if you have the whole XML document in your `message` field: +[source,ruby] + filter { + xml { + source => "message" + } + } + +The above would parse the XML from the `message` field. + +[id="{version}-plugins-{type}s-{plugin}-store_xml"] +===== `store_xml` + + * Value type is <> + * Default value is `true` + +By default the filter will store the whole parsed XML in the destination +field as described above. Setting this to false will prevent that. + +[id="{version}-plugins-{type}s-{plugin}-suppress_empty"] +===== `suppress_empty` + + * Value type is <> + * Default value is `true` + +By default, output nothing if the element is empty. +If set to `false`, empty element will result in an empty hash object. + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define target for placing the data + +For example if you want the data to be put in the `doc` field: +[source,ruby] + filter { + xml { + target => "doc" + } + } + +XML in the value of the source field will be expanded into a +datastructure in the `target` field. +Note: if the `target` field already exists, it will be overridden. +Required if `store_xml` is true (which is the default). + +[id="{version}-plugins-{type}s-{plugin}-xpath"] +===== `xpath` + + * Value type is <> + * Default value is `{}` + +xpath will additionally select string values (non-strings will be +converted to strings with Ruby's `to_s` function) from parsed XML +(using each source field defined using the method above) and place +those values in the destination fields. Configuration: +[source,ruby] +xpath => [ "xpath-syntax", "destination-field" ] + +Values returned by XPath parsing from `xpath-syntax` will be put in the +destination field. Multiple values returned will be pushed onto the +destination field as an array. As such, multiple matches across +multiple source fields will produce duplicate entries in the field. + +More on XPath: http://www.w3schools.com/xml/xml_xpath.asp + +The XPath functions are particularly powerful: +http://www.w3schools.com/xsl/xsl_functions.asp + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/yaml-index.asciidoc b/docs/versioned-plugins/filters/yaml-index.asciidoc new file mode 100644 index 000000000..90b1f60a1 --- /dev/null +++ b/docs/versioned-plugins/filters/yaml-index.asciidoc @@ -0,0 +1,12 @@ +:plugin: yaml +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-06-23 +|======================================================================= + +include::yaml-v0.1.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/yaml-v0.1.1.asciidoc b/docs/versioned-plugins/filters/yaml-v0.1.1.asciidoc new file mode 100644 index 000000000..668533145 --- /dev/null +++ b/docs/versioned-plugins/filters/yaml-v0.1.1.asciidoc @@ -0,0 +1,103 @@ +:plugin: yaml +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v0.1.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-yaml/blob/v0.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Yaml filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This is a YAML parsing filter. It takes an existing field which contains YAML and +expands it into an actual data structure within the Logstash event. + +By default it will place the parsed YAML in the root (top level) of the Logstash event, but this +filter can be configured to place the YAML into any arbitrary event field, using the +`target` configuration. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Yaml Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-exclude_tags"] +===== `exclude_tags` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `[]` + +Only handle events without any of these tags. +Optional. + +[id="{version}-plugins-{type}s-{plugin}-source"] +===== `source` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The configuration for the YAML filter: +[source,ruby] + source => source_field + +For example, if you have YAML data in the @message field: +[source,ruby] + filter { + yaml { + source => "message" + } + } + +The above would parse the yaml from the @message field + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define the target field for placing the parsed data. If this setting is +omitted, the YAML data will be stored at the root (top level) of the event. + +For example, if you want the data to be put in the `doc` field: +[source,ruby] + filter { + yaml { + target => "doc" + } + } + +YAML in the value of the `source` field will be expanded into a +data structure in the `target` field. + +NOTE: if the `target` field already exists, it will be overwritten! + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/zeromq-index.asciidoc b/docs/versioned-plugins/filters/zeromq-index.asciidoc new file mode 100644 index 000000000..f02877fa0 --- /dev/null +++ b/docs/versioned-plugins/filters/zeromq-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: zeromq +:type: filter + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::zeromq-v3.0.2.asciidoc[] +include::zeromq-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/filters/zeromq-v3.0.1.asciidoc b/docs/versioned-plugins/filters/zeromq-v3.0.1.asciidoc new file mode 100644 index 000000000..c8ade864b --- /dev/null +++ b/docs/versioned-plugins/filters/zeromq-v3.0.1.asciidoc @@ -0,0 +1,148 @@ +:plugin: zeromq +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-zeromq/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Zeromq filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +ZeroMQ filter. This is the best way to send an event externally for filtering +It works much like an exec filter would by sending the event "offsite" +for processing and waiting for a response + +The protocol here is: + * REQ sent with JSON-serialized logstash event + * REP read expected to be the full JSON 'filtered' event + * - if reply read is an empty string, it will cancel the event. + +Note that this is a limited subset of the zeromq functionality in +inputs and outputs. The only topology that makes sense here is: +REQ/REP. bunde + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Zeromq Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-add_tag_on_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sentinel>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sockopt>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-add_tag_on_timeout"] +===== `add_tag_on_timeout` + + * Value type is <> + * Default value is `"zeromqtimeout"` + +tag to add if zeromq timeout expires before getting back an answer. +If set to "" then no tag will be added. + +[id="{version}-plugins-{type}s-{plugin}-address"] +===== `address` + + * Value type is <> + * Default value is `"tcp://127.0.0.1:2121"` + +0mq socket address to connect or bind +Please note that inproc:// will not work with logstash +as we use a context per thread +By default, filters connect + +[id="{version}-plugins-{type}s-{plugin}-field"] +===== `field` + + * Value type is <> + * There is no default value for this setting. + +The field to send off-site for processing +If this is unset, the whole event will be sent + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"client"` + +0mq mode +server mode binds/listens +client mode connects + +[id="{version}-plugins-{type}s-{plugin}-retries"] +===== `retries` + + * Value type is <> + * Default value is `3` + +number of retries, used for both sending and receiving messages. +for sending, retries should return instantly. +for receiving, the total blocking time is up to retries X timeout, +which by default is 3 X 500 = 1500ms + +[id="{version}-plugins-{type}s-{plugin}-sentinel"] +===== `sentinel` + + * Value type is <> + * Default value is `""` + +A sentinel value to signal the filter to cancel the event +If the peer returns the sentinel value, the event will be cancelled + +[id="{version}-plugins-{type}s-{plugin}-sockopt"] +===== `sockopt` + + * Value type is <> + * There is no default value for this setting. + +0mq socket options +This exposes zmq_setsockopt +for advanced tuning +see http://api.zeromq.org/2-1:zmq-setsockopt for details + +This is where you would set values like: +ZMQ::HWM - high water mark +ZMQ::IDENTITY - named queues +ZMQ::SWAP_SIZE - space for disk overflow +ZMQ::SUBSCRIBE - topic filters for pubsub + +example: sockopt => ["ZMQ::HWM", 50, "ZMQ::IDENTITY", "my_named_queue"] + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `500` + +timeout in milliseconds on which to wait for a reply. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/zeromq-v3.0.2.asciidoc b/docs/versioned-plugins/filters/zeromq-v3.0.2.asciidoc new file mode 100644 index 000000000..dc184ff9c --- /dev/null +++ b/docs/versioned-plugins/filters/zeromq-v3.0.2.asciidoc @@ -0,0 +1,148 @@ +:plugin: zeromq +:type: filter + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-filter-zeromq/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Zeromq filter plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +ZeroMQ filter. This is the best way to send an event externally for filtering +It works much like an exec filter would by sending the event "offsite" +for processing and waiting for a response + +The protocol here is: + * REQ sent with JSON-serialized logstash event + * REP read expected to be the full JSON 'filtered' event + * - if reply read is an empty string, it will cancel the event. + +Note that this is a limited subset of the zeromq functionality in +inputs and outputs. The only topology that makes sense here is: +REQ/REP. bunde + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Zeromq Filter Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-add_tag_on_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sentinel>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sockopt>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +filter plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-add_tag_on_timeout"] +===== `add_tag_on_timeout` + + * Value type is <> + * Default value is `"zeromqtimeout"` + +tag to add if zeromq timeout expires before getting back an answer. +If set to "" then no tag will be added. + +[id="{version}-plugins-{type}s-{plugin}-address"] +===== `address` + + * Value type is <> + * Default value is `"tcp://127.0.0.1:2121"` + +0mq socket address to connect or bind +Please note that inproc:// will not work with logstash +as we use a context per thread +By default, filters connect + +[id="{version}-plugins-{type}s-{plugin}-field"] +===== `field` + + * Value type is <> + * There is no default value for this setting. + +The field to send off-site for processing +If this is unset, the whole event will be sent + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"client"` + +0mq mode +server mode binds/listens +client mode connects + +[id="{version}-plugins-{type}s-{plugin}-retries"] +===== `retries` + + * Value type is <> + * Default value is `3` + +number of retries, used for both sending and receiving messages. +for sending, retries should return instantly. +for receiving, the total blocking time is up to retries X timeout, +which by default is 3 X 500 = 1500ms + +[id="{version}-plugins-{type}s-{plugin}-sentinel"] +===== `sentinel` + + * Value type is <> + * Default value is `""` + +A sentinel value to signal the filter to cancel the event +If the peer returns the sentinel value, the event will be cancelled + +[id="{version}-plugins-{type}s-{plugin}-sockopt"] +===== `sockopt` + + * Value type is <> + * There is no default value for this setting. + +0mq socket options +This exposes zmq_setsockopt +for advanced tuning +see http://api.zeromq.org/2-1:zmq-setsockopt for details + +This is where you would set values like: +ZMQ::HWM - high water mark +ZMQ::IDENTITY - named queues +ZMQ::SWAP_SIZE - space for disk overflow +ZMQ::SUBSCRIBE - topic filters for pubsub + +example: sockopt => ["ZMQ::HWM", 50, "ZMQ::IDENTITY", "my_named_queue"] + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `500` + +timeout in milliseconds on which to wait for a reply. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs-index.asciidoc b/docs/versioned-plugins/inputs-index.asciidoc new file mode 100644 index 000000000..2cdec3326 --- /dev/null +++ b/docs/versioned-plugins/inputs-index.asciidoc @@ -0,0 +1,74 @@ +:type: input +:type_uc: Input + +include::include/plugin-intro.asciidoc[] + +include::inputs/beats-index.asciidoc[] +include::inputs/cloudwatch-index.asciidoc[] +include::inputs/cloudwatch_logs-index.asciidoc[] +include::inputs/couchdb_changes-index.asciidoc[] +include::inputs/dead_letter_queue-index.asciidoc[] +include::inputs/drupal_dblog-index.asciidoc[] +include::inputs/dynamodb-index.asciidoc[] +include::inputs/elasticsearch-index.asciidoc[] +include::inputs/eventlog-index.asciidoc[] +include::inputs/example-index.asciidoc[] +include::inputs/exec-index.asciidoc[] +include::inputs/file-index.asciidoc[] +include::inputs/fluentd-index.asciidoc[] +include::inputs/ganglia-index.asciidoc[] +include::inputs/gelf-index.asciidoc[] +include::inputs/gemfire-index.asciidoc[] +include::inputs/generator-index.asciidoc[] +include::inputs/github-index.asciidoc[] +include::inputs/google_pubsub-index.asciidoc[] +include::inputs/googleanalytics-index.asciidoc[] +include::inputs/graphite-index.asciidoc[] +include::inputs/heartbeat-index.asciidoc[] +include::inputs/heroku-index.asciidoc[] +include::inputs/http-index.asciidoc[] +include::inputs/http_poller-index.asciidoc[] +include::inputs/imap-index.asciidoc[] +include::inputs/irc-index.asciidoc[] +include::inputs/jdbc-index.asciidoc[] +include::inputs/jms-index.asciidoc[] +include::inputs/jmx-index.asciidoc[] +include::inputs/jmx-pipe-index.asciidoc[] +include::inputs/journald-index.asciidoc[] +include::inputs/kafka-index.asciidoc[] +include::inputs/kinesis-index.asciidoc[] +include::inputs/log4j-index.asciidoc[] +include::inputs/log4j2-index.asciidoc[] +include::inputs/lumberjack-index.asciidoc[] +include::inputs/meetup-index.asciidoc[] +include::inputs/mongodb-index.asciidoc[] +include::inputs/neo4j-index.asciidoc[] +include::inputs/netflow-index.asciidoc[] +include::inputs/perfmon-index.asciidoc[] +include::inputs/pipe-index.asciidoc[] +include::inputs/puppet_facter-index.asciidoc[] +include::inputs/rabbitmq-index.asciidoc[] +include::inputs/rackspace-index.asciidoc[] +include::inputs/redis-index.asciidoc[] +include::inputs/relp-index.asciidoc[] +include::inputs/rss-index.asciidoc[] +include::inputs/s3-index.asciidoc[] +include::inputs/s3sqs-index.asciidoc[] +include::inputs/salesforce-index.asciidoc[] +include::inputs/snmptrap-index.asciidoc[] +include::inputs/sqlite-index.asciidoc[] +include::inputs/sqs-index.asciidoc[] +include::inputs/stdin-index.asciidoc[] +include::inputs/stomp-index.asciidoc[] +include::inputs/syslog-index.asciidoc[] +include::inputs/tcp-index.asciidoc[] +include::inputs/twitter-index.asciidoc[] +include::inputs/udp-index.asciidoc[] +include::inputs/unix-index.asciidoc[] +include::inputs/varnishlog-index.asciidoc[] +include::inputs/websocket-index.asciidoc[] +include::inputs/wmi-index.asciidoc[] +include::inputs/xmpp-index.asciidoc[] +include::inputs/zenoss-index.asciidoc[] +include::inputs/zeromq-index.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/beats-index.asciidoc b/docs/versioned-plugins/inputs/beats-index.asciidoc new file mode 100644 index 000000000..7f4c37ff0 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-index.asciidoc @@ -0,0 +1,34 @@ +:plugin: beats +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2018-01-05 +| <> | 2017-12-19 +| <> | 2017-12-12 +| <> | 2017-11-07 +| <> | 2017-10-06 +| <> | 2017-08-15 +| <> | 2017-07-28 +| <> | 2017-07-12 +| <> | 2017-06-23 +| <> | 2017-06-22 +| <> | 2017-06-07 +| <> | 2017-06-03 +|======================================================================= + +include::beats-v5.0.6.asciidoc[] +include::beats-v5.0.5.asciidoc[] +include::beats-v5.0.4.asciidoc[] +include::beats-v5.0.3.asciidoc[] +include::beats-v5.0.2.asciidoc[] +include::beats-v5.0.1.asciidoc[] +include::beats-v5.0.0.asciidoc[] +include::beats-v4.0.5.asciidoc[] +include::beats-v4.0.4.asciidoc[] +include::beats-v4.0.3.asciidoc[] +include::beats-v4.0.2.asciidoc[] +include::beats-v4.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/beats-v4.0.1.asciidoc b/docs/versioned-plugins/inputs/beats-v4.0.1.asciidoc new file mode 100644 index 000000000..c2ebe427b --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v4.0.1.asciidoc @@ -0,0 +1,240 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.1 +:release_date: 2017-06-03 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v4.0.1/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Beats + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is <> + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is <> + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `5` + +The number of seconds before we raise a timeout. +This option is useful to control how much time to wait if something is blocking the pipeline. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is <> + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is <> + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-target_field_for_codec"] +===== `target_field_for_codec` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `"message"` + +This is the default field to which the specified codec will be applied. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is <> + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is <> + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v4.0.2.asciidoc b/docs/versioned-plugins/inputs/beats-v4.0.2.asciidoc new file mode 100644 index 000000000..0de19813c --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v4.0.2.asciidoc @@ -0,0 +1,240 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.2 +:release_date: 2017-06-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v4.0.2/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Beats + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is <> + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is <> + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `5` + +The number of seconds before we raise a timeout. +This option is useful to control how much time to wait if something is blocking the pipeline. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is <> + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is <> + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-target_field_for_codec"] +===== `target_field_for_codec` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `"message"` + +This is the default field to which the specified codec will be applied. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is <> + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is <> + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v4.0.3.asciidoc b/docs/versioned-plugins/inputs/beats-v4.0.3.asciidoc new file mode 100644 index 000000000..f33b7235e --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v4.0.3.asciidoc @@ -0,0 +1,240 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.3 +:release_date: 2017-06-22 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v4.0.3/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is <> + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is <> + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `5` + +The number of seconds before we raise a timeout. +This option is useful to control how much time to wait if something is blocking the pipeline. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is <> + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is <> + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-target_field_for_codec"] +===== `target_field_for_codec` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `"message"` + +This is the default field to which the specified codec will be applied. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is <> + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is <> + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v4.0.4.asciidoc b/docs/versioned-plugins/inputs/beats-v4.0.4.asciidoc new file mode 100644 index 000000000..5e946b384 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v4.0.4.asciidoc @@ -0,0 +1,241 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v4.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is <> + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is <> + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `5` + +The number of seconds before we raise a timeout. +This option is useful to control how much time to wait if something is blocking the pipeline. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is <> + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is <> + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-target_field_for_codec"] +===== `target_field_for_codec` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `"message"` + +This is the default field to which the specified codec will be applied. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is <> + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is <> + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/beats-v4.0.5.asciidoc b/docs/versioned-plugins/inputs/beats-v4.0.5.asciidoc new file mode 100644 index 000000000..da902bf99 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v4.0.5.asciidoc @@ -0,0 +1,241 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.5 +:release_date: 2017-07-12 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v4.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is <> + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is <> + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `5` + +The number of seconds before we raise a timeout. +This option is useful to control how much time to wait if something is blocking the pipeline. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is <> + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is <> + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-target_field_for_codec"] +===== `target_field_for_codec` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `"message"` + +This is the default field to which the specified codec will be applied. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is <> + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is <> + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc new file mode 100644 index 000000000..463455ad3 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc @@ -0,0 +1,222 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.0 +:release_date: 2017-07-28 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is <> + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is <> + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is <> + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is <> + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is <> + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is <> + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc new file mode 100644 index 000000000..c4ee38ffb --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc @@ -0,0 +1,222 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.1 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is <> + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is <> + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is <> + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is <> + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is <> + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is <> + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.2.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.2.asciidoc new file mode 100644 index 000000000..5d5fb7c11 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v5.0.2.asciidoc @@ -0,0 +1,222 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.2 +:release_date: 2017-10-06 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is <> + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is <> + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is <> + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is <> + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is <> + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is <> + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.3.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.3.asciidoc new file mode 100644 index 000000000..31e16f685 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v5.0.3.asciidoc @@ -0,0 +1,222 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is <> + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is <> + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is <> + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is <> + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is <> + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is <> + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.4.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.4.asciidoc new file mode 100644 index 000000000..cd6366e9f --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v5.0.4.asciidoc @@ -0,0 +1,222 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.4 +:release_date: 2017-12-12 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is <> + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is <> + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is <> + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is <> + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is <> + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is <> + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.5.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.5.asciidoc new file mode 100644 index 000000000..37bbc2759 --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v5.0.5.asciidoc @@ -0,0 +1,222 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.5 +:release_date: 2017-12-19 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is <> + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is <> + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is <> + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is <> + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is <> + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is <> + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v5.0.6.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.6.asciidoc new file mode 100644 index 000000000..abb26230b --- /dev/null +++ b/docs/versioned-plugins/inputs/beats-v5.0.6.asciidoc @@ -0,0 +1,222 @@ +:plugin: beats +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.6 +:release_date: 2018-01-05 +:changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v5.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Beats input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin enables Logstash to receive events from the +https://www.elastic.co/products/beats[Elastic Beats] framework. + +The following example shows how to configure Logstash to listen on port +5044 for incoming Beats connections and to index into Elasticsearch: + +[source,ruby] +------------------------------------------------------------------------------ +input { + beats { + port => 5044 + } +} + +output { + elasticsearch { + hosts => "localhost:9200" + manage_template => false + index => "%{[@metadata][beat]}-%{+YYYY.MM.dd}" + document_type => "%{[@metadata][type]}" + } +} +------------------------------------------------------------------------------ + +NOTE: The Beats shipper automatically sets the `type` field on the event. +You cannot override this setting in the Logstash config. If you specify +a setting for the <> config option in +Logstash, it is ignored. + +IMPORTANT: If you are shipping events that span multiple lines, you need to +use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events +before sending the event data to Logstash. You cannot use the +<> codec to handle multiline events. Doing so will +result in the failure to start Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Beats Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cipher_suites"] +===== `cipher_suites` + + * Value type is <> + * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` + +The list of ciphers suite to use, listed by priorities. + +[id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] +===== `client_inactivity_timeout` + + * Value type is <> + * Default value is `60` + +Close Idle clients after X seconds of inactivity. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] +===== `include_codec_tag` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +Events are by default sent in plain text. You can +enable encryption by setting `ssl` to true and configuring +the `ssl_certificate` and `ssl_key` options. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] +===== `ssl_certificate_authorities` + + * Value type is <> + * Default value is `[]` + +Validate client certificates against these authorities. +You can define multiple files or paths. All the certificates will +be read and added to the trust store. You need to configure the `ssl_verify_mode` +to `peer` or `force_peer` to enable the verification. + + +[id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] +===== `ssl_handshake_timeout` + + * Value type is <> + * Default value is `10000` + +Time in milliseconds for an incomplete ssl handshake to timeout + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key to use. +NOTE: This key need to be in the PKCS8 format, you can convert it with https://www.openssl.org/docs/man1.1.0/apps/pkcs8.html[OpenSSL] +for more information. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * There is no default value for this setting. + +SSL key passphrase to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify_mode"] +===== `ssl_verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +By default the server doesn't do any client verification. + +`peer` will make the server ask the client to provide a certificate. +If the client provides a certificate, it will be validated. + +`force_peer` will make the server ask the client to provide a certificate. +If the client doesn't provide a certificate, the connection will be closed. + +This option needs to be used with `ssl_certificate_authorities` and a defined list of CAs. + +[id="{version}-plugins-{type}s-{plugin}-tls_max_version"] +===== `tls_max_version` + + * Value type is <> + * Default value is `1.2` + +The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + +[id="{version}-plugins-{type}s-{plugin}-tls_min_version"] +===== `tls_min_version` + + * Value type is <> + * Default value is `1` + +The minimum TLS version allowed for the encrypted connections. The value must be one of the following: +1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/cloudwatch-index.asciidoc b/docs/versioned-plugins/inputs/cloudwatch-index.asciidoc new file mode 100644 index 000000000..6bb5d8072 --- /dev/null +++ b/docs/versioned-plugins/inputs/cloudwatch-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: cloudwatch +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::cloudwatch-v2.0.3.asciidoc[] +include::cloudwatch-v2.0.2.asciidoc[] +include::cloudwatch-v2.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/cloudwatch-v2.0.1.asciidoc b/docs/versioned-plugins/inputs/cloudwatch-v2.0.1.asciidoc new file mode 100644 index 000000000..26633ef13 --- /dev/null +++ b/docs/versioned-plugins/inputs/cloudwatch-v2.0.1.asciidoc @@ -0,0 +1,266 @@ +:plugin: cloudwatch +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-cloudwatch/blob/v2.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Cloudwatch input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Pull events from the Amazon Web Services CloudWatch API. + +To use this plugin, you *must* have an AWS account, and the following policy + +Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user. +A sample policy for EC2 metrics is as follows: +[source,json] + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Stmt1444715676000", + "Effect": "Allow", + "Action": [ + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics" + ], + "Resource": "*" + }, + { + "Sid": "Stmt1444716576170", + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances" + ], + "Resource": "*" + } + ] + } + +See http://aws.amazon.com/iam/ for more details on setting up AWS identities. + +# Configuration Example +[source, ruby] + input { + cloudwatch { + namespace => "AWS/EC2" + metrics => [ "CPUUtilization" ] + filters => { "tag:Group" => "API-Production" } + region => "us-east-1" + } + } + + input { + cloudwatch { + namespace => "AWS/EBS" + metrics => ["VolumeQueueLength"] + filters => { "tag:Monitoring" => "Yes" } + region => "us-east-1" + } + } + + input { + cloudwatch { + namespace => "AWS/RDS" + metrics => ["CPUUtilization", "CPUCreditUsage"] + filters => { "EngineName" => "mysql" } # Only supports EngineName, DatabaseClass and DBInstanceIdentifier + region => "us-east-1" + } + } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cloudwatch Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-combined>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-filters>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-period>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-statistics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_ssl>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-combined"] +===== `combined` + + * Value type is <> + * Default value is `false` + +Use this for namespaces that need to combine the dimensions like S3 and SNS. + +[id="{version}-plugins-{type}s-{plugin}-filters"] +===== `filters` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Specify the filters to apply when fetching resources: + +This needs to follow the AWS convention of specifiying filters. +Instances: { 'instance-id' => 'i-12344321' } +Tags: { "tag:Environment" => "Production" } +Volumes: { 'attachment.status' => 'attached' } +Each namespace uniquely support certian dimensions. Please consult the documentation +to ensure you're using valid filters. + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `900` + +Set how frequently CloudWatch should be queried + +The default, `900`, means check every 15 minutes. Setting this value too low +(generally less than 300) results in no metrics being returned from CloudWatch. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value type is <> + * Default value is `["CPUUtilization", "DiskReadOps", "DiskWriteOps", "NetworkIn", "NetworkOut"]` + +Specify the metrics to fetch for the namespace. The defaults are AWS/EC2 specific. See http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html +for the available metrics for other namespaces. + +[id="{version}-plugins-{type}s-{plugin}-namespace"] +===== `namespace` + + * Value type is <> + * Default value is `"AWS/EC2"` + +If undefined, LogStash will complain, even if codec is unused. +The service namespace of the metrics to fetch. + +The default is for the EC2 service. See http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html +for valid values. + +[id="{version}-plugins-{type}s-{plugin}-period"] +===== `period` + + * Value type is <> + * Default value is `300` + +Set the granularity of the returned datapoints. + +Must be at least 60 seconds and in multiples of 60. + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-statistics"] +===== `statistics` + + * Value type is <> + * Default value is `["SampleCount", "Average", "Minimum", "Maximum", "Sum"]` + +Specify the statistics to fetch for each namespace + +[id="{version}-plugins-{type}s-{plugin}-use_ssl"] +===== `use_ssl` + + * Value type is <> + * Default value is `true` + +Make sure we require the V1 classes when including this module. +require 'aws-sdk' will load v2 classes. +Should we require (true) or disable (false) using SSL for communicating with the AWS API +The AWS SDK for Ruby defaults to SSL so we preserve that + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/cloudwatch-v2.0.2.asciidoc b/docs/versioned-plugins/inputs/cloudwatch-v2.0.2.asciidoc new file mode 100644 index 000000000..bc8c01c8e --- /dev/null +++ b/docs/versioned-plugins/inputs/cloudwatch-v2.0.2.asciidoc @@ -0,0 +1,266 @@ +:plugin: cloudwatch +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-cloudwatch/blob/v2.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cloudwatch input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Pull events from the Amazon Web Services CloudWatch API. + +To use this plugin, you *must* have an AWS account, and the following policy + +Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user. +A sample policy for EC2 metrics is as follows: +[source,json] + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Stmt1444715676000", + "Effect": "Allow", + "Action": [ + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics" + ], + "Resource": "*" + }, + { + "Sid": "Stmt1444716576170", + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances" + ], + "Resource": "*" + } + ] + } + +See http://aws.amazon.com/iam/ for more details on setting up AWS identities. + +# Configuration Example +[source, ruby] + input { + cloudwatch { + namespace => "AWS/EC2" + metrics => [ "CPUUtilization" ] + filters => { "tag:Group" => "API-Production" } + region => "us-east-1" + } + } + + input { + cloudwatch { + namespace => "AWS/EBS" + metrics => ["VolumeQueueLength"] + filters => { "tag:Monitoring" => "Yes" } + region => "us-east-1" + } + } + + input { + cloudwatch { + namespace => "AWS/RDS" + metrics => ["CPUUtilization", "CPUCreditUsage"] + filters => { "EngineName" => "mysql" } # Only supports EngineName, DatabaseClass and DBInstanceIdentifier + region => "us-east-1" + } + } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cloudwatch Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-combined>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-filters>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-period>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-statistics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_ssl>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-combined"] +===== `combined` + + * Value type is <> + * Default value is `false` + +Use this for namespaces that need to combine the dimensions like S3 and SNS. + +[id="{version}-plugins-{type}s-{plugin}-filters"] +===== `filters` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Specify the filters to apply when fetching resources: + +This needs to follow the AWS convention of specifiying filters. +Instances: { 'instance-id' => 'i-12344321' } +Tags: { "tag:Environment" => "Production" } +Volumes: { 'attachment.status' => 'attached' } +Each namespace uniquely support certian dimensions. Please consult the documentation +to ensure you're using valid filters. + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `900` + +Set how frequently CloudWatch should be queried + +The default, `900`, means check every 15 minutes. Setting this value too low +(generally less than 300) results in no metrics being returned from CloudWatch. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value type is <> + * Default value is `["CPUUtilization", "DiskReadOps", "DiskWriteOps", "NetworkIn", "NetworkOut"]` + +Specify the metrics to fetch for the namespace. The defaults are AWS/EC2 specific. See http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html +for the available metrics for other namespaces. + +[id="{version}-plugins-{type}s-{plugin}-namespace"] +===== `namespace` + + * Value type is <> + * Default value is `"AWS/EC2"` + +If undefined, LogStash will complain, even if codec is unused. +The service namespace of the metrics to fetch. + +The default is for the EC2 service. See http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html +for valid values. + +[id="{version}-plugins-{type}s-{plugin}-period"] +===== `period` + + * Value type is <> + * Default value is `300` + +Set the granularity of the returned datapoints. + +Must be at least 60 seconds and in multiples of 60. + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-statistics"] +===== `statistics` + + * Value type is <> + * Default value is `["SampleCount", "Average", "Minimum", "Maximum", "Sum"]` + +Specify the statistics to fetch for each namespace + +[id="{version}-plugins-{type}s-{plugin}-use_ssl"] +===== `use_ssl` + + * Value type is <> + * Default value is `true` + +Make sure we require the V1 classes when including this module. +require 'aws-sdk' will load v2 classes. +Should we require (true) or disable (false) using SSL for communicating with the AWS API +The AWS SDK for Ruby defaults to SSL so we preserve that + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/cloudwatch-v2.0.3.asciidoc b/docs/versioned-plugins/inputs/cloudwatch-v2.0.3.asciidoc new file mode 100644 index 000000000..9ab8d443b --- /dev/null +++ b/docs/versioned-plugins/inputs/cloudwatch-v2.0.3.asciidoc @@ -0,0 +1,266 @@ +:plugin: cloudwatch +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-cloudwatch/blob/v2.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cloudwatch input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Pull events from the Amazon Web Services CloudWatch API. + +To use this plugin, you *must* have an AWS account, and the following policy + +Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user. +A sample policy for EC2 metrics is as follows: +[source,json] + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Stmt1444715676000", + "Effect": "Allow", + "Action": [ + "cloudwatch:GetMetricStatistics", + "cloudwatch:ListMetrics" + ], + "Resource": "*" + }, + { + "Sid": "Stmt1444716576170", + "Effect": "Allow", + "Action": [ + "ec2:DescribeInstances" + ], + "Resource": "*" + } + ] + } + +See http://aws.amazon.com/iam/ for more details on setting up AWS identities. + +# Configuration Example +[source, ruby] + input { + cloudwatch { + namespace => "AWS/EC2" + metrics => [ "CPUUtilization" ] + filters => { "tag:Group" => "API-Production" } + region => "us-east-1" + } + } + + input { + cloudwatch { + namespace => "AWS/EBS" + metrics => ["VolumeQueueLength"] + filters => { "tag:Monitoring" => "Yes" } + region => "us-east-1" + } + } + + input { + cloudwatch { + namespace => "AWS/RDS" + metrics => ["CPUUtilization", "CPUCreditUsage"] + filters => { "EngineName" => "mysql" } # Only supports EngineName, DatabaseClass and DBInstanceIdentifier + region => "us-east-1" + } + } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cloudwatch Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-combined>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-filters>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-period>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-statistics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_ssl>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-combined"] +===== `combined` + + * Value type is <> + * Default value is `false` + +Use this for namespaces that need to combine the dimensions like S3 and SNS. + +[id="{version}-plugins-{type}s-{plugin}-filters"] +===== `filters` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Specify the filters to apply when fetching resources: + +This needs to follow the AWS convention of specifiying filters. +Instances: { 'instance-id' => 'i-12344321' } +Tags: { "tag:Environment" => "Production" } +Volumes: { 'attachment.status' => 'attached' } +Each namespace uniquely support certian dimensions. Please consult the documentation +to ensure you're using valid filters. + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `900` + +Set how frequently CloudWatch should be queried + +The default, `900`, means check every 15 minutes. Setting this value too low +(generally less than 300) results in no metrics being returned from CloudWatch. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value type is <> + * Default value is `["CPUUtilization", "DiskReadOps", "DiskWriteOps", "NetworkIn", "NetworkOut"]` + +Specify the metrics to fetch for the namespace. The defaults are AWS/EC2 specific. See http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html +for the available metrics for other namespaces. + +[id="{version}-plugins-{type}s-{plugin}-namespace"] +===== `namespace` + + * Value type is <> + * Default value is `"AWS/EC2"` + +If undefined, LogStash will complain, even if codec is unused. +The service namespace of the metrics to fetch. + +The default is for the EC2 service. See http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html +for valid values. + +[id="{version}-plugins-{type}s-{plugin}-period"] +===== `period` + + * Value type is <> + * Default value is `300` + +Set the granularity of the returned datapoints. + +Must be at least 60 seconds and in multiples of 60. + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-statistics"] +===== `statistics` + + * Value type is <> + * Default value is `["SampleCount", "Average", "Minimum", "Maximum", "Sum"]` + +Specify the statistics to fetch for each namespace + +[id="{version}-plugins-{type}s-{plugin}-use_ssl"] +===== `use_ssl` + + * Value type is <> + * Default value is `true` + +Make sure we require the V1 classes when including this module. +require 'aws-sdk' will load v2 classes. +Should we require (true) or disable (false) using SSL for communicating with the AWS API +The AWS SDK for Ruby defaults to SSL so we preserve that + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/cloudwatch_logs-index.asciidoc b/docs/versioned-plugins/inputs/cloudwatch_logs-index.asciidoc new file mode 100644 index 000000000..9aca25cc2 --- /dev/null +++ b/docs/versioned-plugins/inputs/cloudwatch_logs-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: cloudwatch_logs +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/inputs/couchdb_changes-index.asciidoc b/docs/versioned-plugins/inputs/couchdb_changes-index.asciidoc new file mode 100644 index 000000000..1749eebe7 --- /dev/null +++ b/docs/versioned-plugins/inputs/couchdb_changes-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: couchdb_changes +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::couchdb_changes-v3.1.4.asciidoc[] +include::couchdb_changes-v3.1.3.asciidoc[] +include::couchdb_changes-v3.1.2.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/couchdb_changes-v3.1.2.asciidoc b/docs/versioned-plugins/inputs/couchdb_changes-v3.1.2.asciidoc new file mode 100644 index 000000000..0801defb4 --- /dev/null +++ b/docs/versioned-plugins/inputs/couchdb_changes-v3.1.2.asciidoc @@ -0,0 +1,220 @@ +:plugin: couchdb_changes +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-couchdb_changes/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Couchdb_changes input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This CouchDB input allows you to automatically stream events from the +CouchDB http://guide.couchdb.org/draft/notifications.html[_changes] URI. +Moreover, any "future" changes will automatically be streamed as well making it easy to synchronize +your CouchDB data with any target destination + +### Upsert and delete +You can use event metadata to allow for document deletion. +All non-delete operations are treated as upserts + +### Starting at a Specific Sequence +The CouchDB input stores the last sequence number value in location defined by `sequence_path`. +You can use this fact to start or resume the stream at a particular sequence. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Couchdb_changes Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-always_reconnect>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ignore_attachments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-initial_sequence>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keep_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keep_revision>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sequence_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-always_reconnect"] +===== `always_reconnect` + + * Value type is <> + * Default value is `true` + +Reconnect flag. When true, always try to reconnect after a failure + +[id="{version}-plugins-{type}s-{plugin}-ca_file"] +===== `ca_file` + + * Value type is <> + * There is no default value for this setting. + +Path to a CA certificate file, used to validate certificates + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The CouchDB db to connect to. +Required parameter. + +[id="{version}-plugins-{type}s-{plugin}-heartbeat"] +===== `heartbeat` + + * Value type is <> + * Default value is `1000` + +Logstash connects to CouchDB's _changes with feed=continuous +The heartbeat is how often (in milliseconds) Logstash will ping +CouchDB to ensure the connection is maintained. Changing this +setting is not recommended unless you know what you are doing. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +IP or hostname of your CouchDB instance + +[id="{version}-plugins-{type}s-{plugin}-ignore_attachments"] +===== `ignore_attachments` + + * Value type is <> + * Default value is `true` + +Future feature! Until implemented, changing this from the default +will not do anything. + +Ignore attachments associated with CouchDB documents. + +[id="{version}-plugins-{type}s-{plugin}-initial_sequence"] +===== `initial_sequence` + + * Value type is <> + * There is no default value for this setting. + +If unspecified, Logstash will attempt to read the last sequence number +from the `sequence_path` file. If that is empty or non-existent, it will +begin with 0 (the beginning). + +If you specify this value, it is anticipated that you will +only be doing so for an initial read under special circumstances +and that you will unset this value afterwards. + +[id="{version}-plugins-{type}s-{plugin}-keep_id"] +===== `keep_id` + + * Value type is <> + * Default value is `false` + +Preserve the CouchDB document id "_id" value in the +output. + +[id="{version}-plugins-{type}s-{plugin}-keep_revision"] +===== `keep_revision` + + * Value type is <> + * Default value is `false` + +Preserve the CouchDB document revision "_rev" value in the +output. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `nil` + +Password, if authentication is needed to connect to +CouchDB + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5984` + +Port of your CouchDB instance. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_delay"] +===== `reconnect_delay` + + * Value type is <> + * Default value is `10` + +Reconnect delay: time between reconnect attempts, in seconds. + +[id="{version}-plugins-{type}s-{plugin}-secure"] +===== `secure` + + * Value type is <> + * Default value is `false` + +Connect to CouchDB's _changes feed securely (via https) +Default: false (via http) + +[id="{version}-plugins-{type}s-{plugin}-sequence_path"] +===== `sequence_path` + + * Value type is <> + * There is no default value for this setting. + +File path where the last sequence number in the _changes +stream is stored. If unset it will write to `$HOME/.couchdb_seq` + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * There is no default value for this setting. + +Timeout: Number of milliseconds to wait for new data before +terminating the connection. If a timeout is set it will disable +the heartbeat configuration option. + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * Value type is <> + * Default value is `nil` + +Username, if authentication is needed to connect to +CouchDB + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/couchdb_changes-v3.1.3.asciidoc b/docs/versioned-plugins/inputs/couchdb_changes-v3.1.3.asciidoc new file mode 100644 index 000000000..9099e8d8b --- /dev/null +++ b/docs/versioned-plugins/inputs/couchdb_changes-v3.1.3.asciidoc @@ -0,0 +1,220 @@ +:plugin: couchdb_changes +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-couchdb_changes/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Couchdb_changes input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This CouchDB input allows you to automatically stream events from the +CouchDB http://guide.couchdb.org/draft/notifications.html[_changes] URI. +Moreover, any "future" changes will automatically be streamed as well making it easy to synchronize +your CouchDB data with any target destination + +### Upsert and delete +You can use event metadata to allow for document deletion. +All non-delete operations are treated as upserts + +### Starting at a Specific Sequence +The CouchDB input stores the last sequence number value in location defined by `sequence_path`. +You can use this fact to start or resume the stream at a particular sequence. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Couchdb_changes Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-always_reconnect>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ignore_attachments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-initial_sequence>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keep_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keep_revision>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sequence_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-always_reconnect"] +===== `always_reconnect` + + * Value type is <> + * Default value is `true` + +Reconnect flag. When true, always try to reconnect after a failure + +[id="{version}-plugins-{type}s-{plugin}-ca_file"] +===== `ca_file` + + * Value type is <> + * There is no default value for this setting. + +Path to a CA certificate file, used to validate certificates + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The CouchDB db to connect to. +Required parameter. + +[id="{version}-plugins-{type}s-{plugin}-heartbeat"] +===== `heartbeat` + + * Value type is <> + * Default value is `1000` + +Logstash connects to CouchDB's _changes with feed=continuous +The heartbeat is how often (in milliseconds) Logstash will ping +CouchDB to ensure the connection is maintained. Changing this +setting is not recommended unless you know what you are doing. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +IP or hostname of your CouchDB instance + +[id="{version}-plugins-{type}s-{plugin}-ignore_attachments"] +===== `ignore_attachments` + + * Value type is <> + * Default value is `true` + +Future feature! Until implemented, changing this from the default +will not do anything. + +Ignore attachments associated with CouchDB documents. + +[id="{version}-plugins-{type}s-{plugin}-initial_sequence"] +===== `initial_sequence` + + * Value type is <> + * There is no default value for this setting. + +If unspecified, Logstash will attempt to read the last sequence number +from the `sequence_path` file. If that is empty or non-existent, it will +begin with 0 (the beginning). + +If you specify this value, it is anticipated that you will +only be doing so for an initial read under special circumstances +and that you will unset this value afterwards. + +[id="{version}-plugins-{type}s-{plugin}-keep_id"] +===== `keep_id` + + * Value type is <> + * Default value is `false` + +Preserve the CouchDB document id "_id" value in the +output. + +[id="{version}-plugins-{type}s-{plugin}-keep_revision"] +===== `keep_revision` + + * Value type is <> + * Default value is `false` + +Preserve the CouchDB document revision "_rev" value in the +output. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `nil` + +Password, if authentication is needed to connect to +CouchDB + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5984` + +Port of your CouchDB instance. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_delay"] +===== `reconnect_delay` + + * Value type is <> + * Default value is `10` + +Reconnect delay: time between reconnect attempts, in seconds. + +[id="{version}-plugins-{type}s-{plugin}-secure"] +===== `secure` + + * Value type is <> + * Default value is `false` + +Connect to CouchDB's _changes feed securely (via https) +Default: false (via http) + +[id="{version}-plugins-{type}s-{plugin}-sequence_path"] +===== `sequence_path` + + * Value type is <> + * There is no default value for this setting. + +File path where the last sequence number in the _changes +stream is stored. If unset it will write to `$HOME/.couchdb_seq` + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * There is no default value for this setting. + +Timeout: Number of milliseconds to wait for new data before +terminating the connection. If a timeout is set it will disable +the heartbeat configuration option. + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * Value type is <> + * Default value is `nil` + +Username, if authentication is needed to connect to +CouchDB + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/couchdb_changes-v3.1.4.asciidoc b/docs/versioned-plugins/inputs/couchdb_changes-v3.1.4.asciidoc new file mode 100644 index 000000000..451c2cd96 --- /dev/null +++ b/docs/versioned-plugins/inputs/couchdb_changes-v3.1.4.asciidoc @@ -0,0 +1,220 @@ +:plugin: couchdb_changes +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.4 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-couchdb_changes/blob/v3.1.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Couchdb_changes input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This CouchDB input allows you to automatically stream events from the +CouchDB http://guide.couchdb.org/draft/notifications.html[_changes] URI. +Moreover, any "future" changes will automatically be streamed as well making it easy to synchronize +your CouchDB data with any target destination + +### Upsert and delete +You can use event metadata to allow for document deletion. +All non-delete operations are treated as upserts + +### Starting at a Specific Sequence +The CouchDB input stores the last sequence number value in location defined by `sequence_path`. +You can use this fact to start or resume the stream at a particular sequence. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Couchdb_changes Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-always_reconnect>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ignore_attachments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-initial_sequence>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keep_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keep_revision>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sequence_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-always_reconnect"] +===== `always_reconnect` + + * Value type is <> + * Default value is `true` + +Reconnect flag. When true, always try to reconnect after a failure + +[id="{version}-plugins-{type}s-{plugin}-ca_file"] +===== `ca_file` + + * Value type is <> + * There is no default value for this setting. + +Path to a CA certificate file, used to validate certificates + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The CouchDB db to connect to. +Required parameter. + +[id="{version}-plugins-{type}s-{plugin}-heartbeat"] +===== `heartbeat` + + * Value type is <> + * Default value is `1000` + +Logstash connects to CouchDB's _changes with feed=continuous +The heartbeat is how often (in milliseconds) Logstash will ping +CouchDB to ensure the connection is maintained. Changing this +setting is not recommended unless you know what you are doing. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +IP or hostname of your CouchDB instance + +[id="{version}-plugins-{type}s-{plugin}-ignore_attachments"] +===== `ignore_attachments` + + * Value type is <> + * Default value is `true` + +Future feature! Until implemented, changing this from the default +will not do anything. + +Ignore attachments associated with CouchDB documents. + +[id="{version}-plugins-{type}s-{plugin}-initial_sequence"] +===== `initial_sequence` + + * Value type is <> + * There is no default value for this setting. + +If unspecified, Logstash will attempt to read the last sequence number +from the `sequence_path` file. If that is empty or non-existent, it will +begin with 0 (the beginning). + +If you specify this value, it is anticipated that you will +only be doing so for an initial read under special circumstances +and that you will unset this value afterwards. + +[id="{version}-plugins-{type}s-{plugin}-keep_id"] +===== `keep_id` + + * Value type is <> + * Default value is `false` + +Preserve the CouchDB document id "_id" value in the +output. + +[id="{version}-plugins-{type}s-{plugin}-keep_revision"] +===== `keep_revision` + + * Value type is <> + * Default value is `false` + +Preserve the CouchDB document revision "_rev" value in the +output. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `nil` + +Password, if authentication is needed to connect to +CouchDB + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5984` + +Port of your CouchDB instance. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_delay"] +===== `reconnect_delay` + + * Value type is <> + * Default value is `10` + +Reconnect delay: time between reconnect attempts, in seconds. + +[id="{version}-plugins-{type}s-{plugin}-secure"] +===== `secure` + + * Value type is <> + * Default value is `false` + +Connect to CouchDB's _changes feed securely (via https) +Default: false (via http) + +[id="{version}-plugins-{type}s-{plugin}-sequence_path"] +===== `sequence_path` + + * Value type is <> + * There is no default value for this setting. + +File path where the last sequence number in the _changes +stream is stored. If unset it will write to `$HOME/.couchdb_seq` + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * There is no default value for this setting. + +Timeout: Number of milliseconds to wait for new data before +terminating the connection. If a timeout is set it will disable +the heartbeat configuration option. + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * Value type is <> + * Default value is `nil` + +Username, if authentication is needed to connect to +CouchDB + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-index.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-index.asciidoc new file mode 100644 index 000000000..5f2a99acf --- /dev/null +++ b/docs/versioned-plugins/inputs/dead_letter_queue-index.asciidoc @@ -0,0 +1,24 @@ +:plugin: dead_letter_queue +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-09-20 +| <> | 2017-08-25 +| <> | 2017-08-15 +| <> | 2017-07-12 +| <> | 2017-06-23 +| <> | 2017-06-06 +|======================================================================= + +include::dead_letter_queue-v1.1.2.asciidoc[] +include::dead_letter_queue-v1.1.1.asciidoc[] +include::dead_letter_queue-v1.1.0.asciidoc[] +include::dead_letter_queue-v1.0.6.asciidoc[] +include::dead_letter_queue-v1.0.5.asciidoc[] +include::dead_letter_queue-v1.0.4.asciidoc[] +include::dead_letter_queue-v1.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.3.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.3.asciidoc new file mode 100644 index 000000000..24f84678f --- /dev/null +++ b/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.3.asciidoc @@ -0,0 +1,108 @@ +:plugin: dead_letter_queue +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.3 +:release_date: 2017-06-06 +:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.0.3/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Dead_letter_queue + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Logstash input to read events from Logstash's dead letter queue. + +[source, sh] +----------------------------------------- +input { + dead_letter_queue { + path => "/var/logstash/data/dead_letter_queue" + start_timestamp => "2017-04-04T23:40:37" + } +} +----------------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Dead_letter_queue Input Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes +| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] +===== `commit_offsets` + + * Value type is <> + * Default value is `true` + +Specifies whether this input should commit offsets as it processes the events. +Typically you specify `false` when you want to iterate multiple times over the +events in the dead letter queue, but don't want to save state. This is when you +are exploring the events in the dead letter queue. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Path to the dead letter queue directory that was created by a Logstash instance. +This is the path from which "dead" events are read and is typically configured +in the original Logstash instance with the setting `path.dead_letter_queue`. + +[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] +===== `pipeline_id` + + * Value type is <> + * Default value is `"main"` + +ID of the pipeline whose events you want to read from. + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is <> + * There is no default value for this setting. + +Path of the sincedb database file (keeps track of the current position of dead letter queue) that +will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. + +NOTE: This value must be a file path and not a directory path. + +[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] +===== `start_timestamp` + + * Value type is <> + * There is no default value for this setting. + +Timestamp in ISO8601 format from when you want to start processing the events from. +For example, `2017-04-04T23:40:37`. + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.4.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.4.asciidoc new file mode 100644 index 000000000..9f8dc1474 --- /dev/null +++ b/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.4.asciidoc @@ -0,0 +1,109 @@ +:plugin: dead_letter_queue +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Dead_letter_queue input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Logstash input to read events from Logstash's dead letter queue. + +[source, sh] +----------------------------------------- +input { + dead_letter_queue { + path => "/var/logstash/data/dead_letter_queue" + start_timestamp => "2017-04-04T23:40:37" + } +} +----------------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Dead_letter_queue Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes +| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] +===== `commit_offsets` + + * Value type is <> + * Default value is `true` + +Specifies whether this input should commit offsets as it processes the events. +Typically you specify `false` when you want to iterate multiple times over the +events in the dead letter queue, but don't want to save state. This is when you +are exploring the events in the dead letter queue. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Path to the dead letter queue directory that was created by a Logstash instance. +This is the path from which "dead" events are read and is typically configured +in the original Logstash instance with the setting `path.dead_letter_queue`. + +[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] +===== `pipeline_id` + + * Value type is <> + * Default value is `"main"` + +ID of the pipeline whose events you want to read from. + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is <> + * There is no default value for this setting. + +Path of the sincedb database file (keeps track of the current position of dead letter queue) that +will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. + +NOTE: This value must be a file path and not a directory path. + +[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] +===== `start_timestamp` + + * Value type is <> + * There is no default value for this setting. + +Timestamp in ISO8601 format from when you want to start processing the events from. +For example, `2017-04-04T23:40:37`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.5.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.5.asciidoc new file mode 100644 index 000000000..b2078c3a1 --- /dev/null +++ b/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.5.asciidoc @@ -0,0 +1,109 @@ +:plugin: dead_letter_queue +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.5 +:release_date: 2017-07-12 +:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Dead_letter_queue input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Logstash input to read events from Logstash's dead letter queue. + +[source, sh] +----------------------------------------- +input { + dead_letter_queue { + path => "/var/logstash/data/dead_letter_queue" + start_timestamp => "2017-04-04T23:40:37" + } +} +----------------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Dead_letter_queue Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes +| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] +===== `commit_offsets` + + * Value type is <> + * Default value is `true` + +Specifies whether this input should commit offsets as it processes the events. +Typically you specify `false` when you want to iterate multiple times over the +events in the dead letter queue, but don't want to save state. This is when you +are exploring the events in the dead letter queue. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Path to the dead letter queue directory that was created by a Logstash instance. +This is the path from which "dead" events are read and is typically configured +in the original Logstash instance with the setting `path.dead_letter_queue`. + +[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] +===== `pipeline_id` + + * Value type is <> + * Default value is `"main"` + +ID of the pipeline whose events you want to read from. + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is <> + * There is no default value for this setting. + +Path of the sincedb database file (keeps track of the current position of dead letter queue) that +will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. + +NOTE: This value must be a file path and not a directory path. + +[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] +===== `start_timestamp` + + * Value type is <> + * There is no default value for this setting. + +Timestamp in ISO8601 format from when you want to start processing the events from. +For example, `2017-04-04T23:40:37`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.6.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.6.asciidoc new file mode 100644 index 000000000..1cb2f1507 --- /dev/null +++ b/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.6.asciidoc @@ -0,0 +1,109 @@ +:plugin: dead_letter_queue +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.6 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Dead_letter_queue input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Logstash input to read events from Logstash's dead letter queue. + +[source, sh] +----------------------------------------- +input { + dead_letter_queue { + path => "/var/logstash/data/dead_letter_queue" + start_timestamp => "2017-04-04T23:40:37" + } +} +----------------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Dead_letter_queue Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes +| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] +===== `commit_offsets` + + * Value type is <> + * Default value is `true` + +Specifies whether this input should commit offsets as it processes the events. +Typically you specify `false` when you want to iterate multiple times over the +events in the dead letter queue, but don't want to save state. This is when you +are exploring the events in the dead letter queue. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Path to the dead letter queue directory that was created by a Logstash instance. +This is the path from which "dead" events are read and is typically configured +in the original Logstash instance with the setting `path.dead_letter_queue`. + +[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] +===== `pipeline_id` + + * Value type is <> + * Default value is `"main"` + +ID of the pipeline whose events you want to read from. + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is <> + * There is no default value for this setting. + +Path of the sincedb database file (keeps track of the current position of dead letter queue) that +will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. + +NOTE: This value must be a file path and not a directory path. + +[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] +===== `start_timestamp` + + * Value type is <> + * There is no default value for this setting. + +Timestamp in ISO8601 format from when you want to start processing the events from. +For example, `2017-04-04T23:40:37`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.0.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.0.asciidoc new file mode 100644 index 000000000..5b75eb130 --- /dev/null +++ b/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.0.asciidoc @@ -0,0 +1,109 @@ +:plugin: dead_letter_queue +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.1.0 +:release_date: 2017-08-25 +:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.1.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Dead_letter_queue input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Logstash input to read events from Logstash's dead letter queue. + +[source, sh] +----------------------------------------- +input { + dead_letter_queue { + path => "/var/logstash/data/dead_letter_queue" + start_timestamp => "2017-04-04T23:40:37" + } +} +----------------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Dead_letter_queue Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes +| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] +===== `commit_offsets` + + * Value type is <> + * Default value is `true` + +Specifies whether this input should commit offsets as it processes the events. +Typically you specify `false` when you want to iterate multiple times over the +events in the dead letter queue, but don't want to save state. This is when you +are exploring the events in the dead letter queue. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Path to the dead letter queue directory that was created by a Logstash instance. +This is the path from which "dead" events are read and is typically configured +in the original Logstash instance with the setting `path.dead_letter_queue`. + +[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] +===== `pipeline_id` + + * Value type is <> + * Default value is `"main"` + +ID of the pipeline whose events you want to read from. + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is <> + * There is no default value for this setting. + +Path of the sincedb database file (keeps track of the current position of dead letter queue) that +will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. + +NOTE: This value must be a file path and not a directory path. + +[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] +===== `start_timestamp` + + * Value type is <> + * There is no default value for this setting. + +Timestamp in ISO8601 format from when you want to start processing the events from. +For example, `2017-04-04T23:40:37`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.1.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.1.asciidoc new file mode 100644 index 000000000..041bfd037 --- /dev/null +++ b/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.1.asciidoc @@ -0,0 +1,112 @@ +:plugin: dead_letter_queue +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.1.1 +:release_date: 2017-09-20 +:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Dead_letter_queue input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Logstash input to read events from Logstash's dead letter queue. + +[source, sh] +----------------------------------------- +input { + dead_letter_queue { + path => "/var/logstash/data/dead_letter_queue" + start_timestamp => "2017-04-04T23:40:37" + } +} +----------------------------------------- + + ++For more information about processing events in the dead letter queue, see ++{logstash-ref}/dead-letter-queues.html[Dead Letter Queues]. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Dead_letter_queue Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes +| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] +===== `commit_offsets` + + * Value type is <> + * Default value is `true` + +Specifies whether this input should commit offsets as it processes the events. +Typically you specify `false` when you want to iterate multiple times over the +events in the dead letter queue, but don't want to save state. This is when you +are exploring the events in the dead letter queue. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Path to the dead letter queue directory that was created by a Logstash instance. +This is the path from which "dead" events are read and is typically configured +in the original Logstash instance with the setting `path.dead_letter_queue`. + +[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] +===== `pipeline_id` + + * Value type is <> + * Default value is `"main"` + +ID of the pipeline whose events you want to read from. + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is <> + * There is no default value for this setting. + +Path of the sincedb database file (keeps track of the current position of dead letter queue) that +will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. + +NOTE: This value must be a file path and not a directory path. + +[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] +===== `start_timestamp` + + * Value type is <> + * There is no default value for this setting. + +Timestamp in ISO8601 format from when you want to start processing the events from. +For example, `2017-04-04T23:40:37`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.2.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.2.asciidoc new file mode 100644 index 000000000..27d165723 --- /dev/null +++ b/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.2.asciidoc @@ -0,0 +1,112 @@ +:plugin: dead_letter_queue +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.1.2 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Dead_letter_queue input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Logstash input to read events from Logstash's dead letter queue. + +[source, sh] +----------------------------------------- +input { + dead_letter_queue { + path => "/var/logstash/data/dead_letter_queue" + start_timestamp => "2017-04-04T23:40:37" + } +} +----------------------------------------- + + ++For more information about processing events in the dead letter queue, see ++{logstash-ref}/dead-letter-queues.html[Dead Letter Queues]. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Dead_letter_queue Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes +| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] +===== `commit_offsets` + + * Value type is <> + * Default value is `true` + +Specifies whether this input should commit offsets as it processes the events. +Typically you specify `false` when you want to iterate multiple times over the +events in the dead letter queue, but don't want to save state. This is when you +are exploring the events in the dead letter queue. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Path to the dead letter queue directory that was created by a Logstash instance. +This is the path from which "dead" events are read and is typically configured +in the original Logstash instance with the setting `path.dead_letter_queue`. + +[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] +===== `pipeline_id` + + * Value type is <> + * Default value is `"main"` + +ID of the pipeline whose events you want to read from. + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is <> + * There is no default value for this setting. + +Path of the sincedb database file (keeps track of the current position of dead letter queue) that +will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. + +NOTE: This value must be a file path and not a directory path. + +[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] +===== `start_timestamp` + + * Value type is <> + * There is no default value for this setting. + +Timestamp in ISO8601 format from when you want to start processing the events from. +For example, `2017-04-04T23:40:37`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/drupal_dblog-index.asciidoc b/docs/versioned-plugins/inputs/drupal_dblog-index.asciidoc new file mode 100644 index 000000000..7c7d4b835 --- /dev/null +++ b/docs/versioned-plugins/inputs/drupal_dblog-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: drupal_dblog +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::drupal_dblog-v2.0.6.asciidoc[] +include::drupal_dblog-v2.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/drupal_dblog-v2.0.5.asciidoc b/docs/versioned-plugins/inputs/drupal_dblog-v2.0.5.asciidoc new file mode 100644 index 000000000..ada5190bf --- /dev/null +++ b/docs/versioned-plugins/inputs/drupal_dblog-v2.0.5.asciidoc @@ -0,0 +1,102 @@ +:plugin: drupal_dblog +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-drupal_dblog/blob/v2.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Drupal_dblog input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Retrieve watchdog log events from a Drupal installation with DBLog enabled. +The events are pulled out directly from the database. +The original events are not deleted, and on every consecutive run only new +events are pulled. + +The last watchdog event id that was processed is stored in the Drupal +variable table with the name "logstash_last_wid". Delete this variable or +set it to 0 if you want to re-import all events. + +More info on DBLog: http://drupal.org/documentation/modules/dblog + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Drupal_dblog Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-add_usernames>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulksize>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-databases>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-add_usernames"] +===== `add_usernames` + + * Value type is <> + * Default value is `false` + +By default, the event only contains the current user id as a field. +If you whish to add the username as an additional field, set this to true. + +[id="{version}-plugins-{type}s-{plugin}-bulksize"] +===== `bulksize` + + * Value type is <> + * Default value is `5000` + +The amount of log messages that should be fetched with each query. +Bulk fetching is done to prevent querying huge data sets when lots of +messages are in the database. + +[id="{version}-plugins-{type}s-{plugin}-databases"] +===== `databases` + + * Value type is <> + * There is no default value for this setting. + +Specify all drupal databases that you whish to import from. +This can be as many as you whish. +The format is a hash, with a unique site name as the key, and a databse +url as the value. + +Example: +[ + "site1", "mysql://user1:password@host1.com/databasename", + "other_site", "mysql://user2:password@otherhost.com/databasename", + ... +] + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `10` + +Time between checks in minutes. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/drupal_dblog-v2.0.6.asciidoc b/docs/versioned-plugins/inputs/drupal_dblog-v2.0.6.asciidoc new file mode 100644 index 000000000..df31047c0 --- /dev/null +++ b/docs/versioned-plugins/inputs/drupal_dblog-v2.0.6.asciidoc @@ -0,0 +1,102 @@ +:plugin: drupal_dblog +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.6 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-drupal_dblog/blob/v2.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Drupal_dblog input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Retrieve watchdog log events from a Drupal installation with DBLog enabled. +The events are pulled out directly from the database. +The original events are not deleted, and on every consecutive run only new +events are pulled. + +The last watchdog event id that was processed is stored in the Drupal +variable table with the name "logstash_last_wid". Delete this variable or +set it to 0 if you want to re-import all events. + +More info on DBLog: http://drupal.org/documentation/modules/dblog + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Drupal_dblog Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-add_usernames>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulksize>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-databases>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-add_usernames"] +===== `add_usernames` + + * Value type is <> + * Default value is `false` + +By default, the event only contains the current user id as a field. +If you whish to add the username as an additional field, set this to true. + +[id="{version}-plugins-{type}s-{plugin}-bulksize"] +===== `bulksize` + + * Value type is <> + * Default value is `5000` + +The amount of log messages that should be fetched with each query. +Bulk fetching is done to prevent querying huge data sets when lots of +messages are in the database. + +[id="{version}-plugins-{type}s-{plugin}-databases"] +===== `databases` + + * Value type is <> + * There is no default value for this setting. + +Specify all drupal databases that you whish to import from. +This can be as many as you whish. +The format is a hash, with a unique site name as the key, and a databse +url as the value. + +Example: +[ + "site1", "mysql://user1:password@host1.com/databasename", + "other_site", "mysql://user2:password@otherhost.com/databasename", + ... +] + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `10` + +Time between checks in minutes. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/dynamodb-index.asciidoc b/docs/versioned-plugins/inputs/dynamodb-index.asciidoc new file mode 100644 index 000000000..e0cbc16b7 --- /dev/null +++ b/docs/versioned-plugins/inputs/dynamodb-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: dynamodb +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/inputs/elasticsearch-index.asciidoc b/docs/versioned-plugins/inputs/elasticsearch-index.asciidoc new file mode 100644 index 000000000..8984db997 --- /dev/null +++ b/docs/versioned-plugins/inputs/elasticsearch-index.asciidoc @@ -0,0 +1,20 @@ +:plugin: elasticsearch +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-10-27 +| <> | 2017-09-20 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::elasticsearch-v4.1.1.asciidoc[] +include::elasticsearch-v4.1.0.asciidoc[] +include::elasticsearch-v4.0.6.asciidoc[] +include::elasticsearch-v4.0.5.asciidoc[] +include::elasticsearch-v4.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/elasticsearch-v4.0.4.asciidoc b/docs/versioned-plugins/inputs/elasticsearch-v4.0.4.asciidoc new file mode 100644 index 000000000..8c79ab613 --- /dev/null +++ b/docs/versioned-plugins/inputs/elasticsearch-v4.0.4.asciidoc @@ -0,0 +1,220 @@ +:plugin: elasticsearch +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-elasticsearch/blob/v4.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Elasticsearch input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch input +plugin to version 4.0.2 or higher. + +================================================================================ + +Read from an Elasticsearch cluster, based on search query results. +This is useful for replaying test logs, reindexing, etc. + +Example: +[source,ruby] + input { + # Read all documents from Elasticsearch matching the given query + elasticsearch { + hosts => "localhost" + query => '{ "query": { "match": { "statuscode": 200 } }, "sort": [ "_doc" ] }' + } + } + +This would create an Elasticsearch query with the following format: +[source,json] + curl 'http://localhost:9200/logstash-*/_search?&scroll=1m&size=1000' -d '{ + "query": { + "match": { + "statuscode": 200 + } + }, + "sort": [ "_doc" ] + }' + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-docinfo>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-docinfo_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-docinfo_target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scroll>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ca_file"] +===== `ca_file` + + * Value type is <> + * There is no default value for this setting. + +SSL Certificate Authority file in PEM encoded format, must also include any chain certificates as necessary + +[id="{version}-plugins-{type}s-{plugin}-docinfo"] +===== `docinfo` + + * Value type is <> + * Default value is `false` + +If set, include Elasticsearch document information such as index, type, and +the id in the event. + +It might be important to note, with regards to metadata, that if you're +ingesting documents with the intent to re-index them (or just update them) +that the `action` option in the elasticsearch output wants to know how to +handle those things. It can be dynamically assigned with a field +added to the metadata. + +Example +[source, ruby] + input { + elasticsearch { + hosts => "es.production.mysite.org" + index => "mydata-2018.09.*" + query => "*" + size => 500 + scroll => "5m" + docinfo => true + } + } + output { + elasticsearch { + index => "copy-of-production.%{[@metadata][_index]}" + document_type => "%{[@metadata][_type]}" + document_id => "%{[@metadata][_id]}" + } + } + + +[id="{version}-plugins-{type}s-{plugin}-docinfo_fields"] +===== `docinfo_fields` + + * Value type is <> + * Default value is `["_index", "_type", "_id"]` + +List of document metadata to move to the `docinfo_target` field +To learn more about Elasticsearch metadata fields read +http://www.elasticsearch.org/guide/en/elasticsearch/guide/current/_document_metadata.html + +[id="{version}-plugins-{type}s-{plugin}-docinfo_target"] +===== `docinfo_target` + + * Value type is <> + * Default value is `"@metadata"` + +Where to move the Elasticsearch document information by default we use the @metadata field. + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * There is no default value for this setting. + +List of elasticsearch hosts to use for querying. +each host can be either IP, HOST, IP:port or HOST:port +port defaults to 9200 + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-*"` + +The index or alias to search. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Basic Auth - password + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * Value type is <> + * Default value is `"{ \"sort\": [ \"_doc\" ] }"` + +The query to be executed. Read the Elasticsearch query DSL documentation +for more info +https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html + +[id="{version}-plugins-{type}s-{plugin}-scroll"] +===== `scroll` + + * Value type is <> + * Default value is `"1m"` + +This parameter controls the keepalive time in seconds of the scrolling +request and initiates the scrolling process. The timeout applies per +round trip (i.e. between the previous scroll request, to the next). + +[id="{version}-plugins-{type}s-{plugin}-size"] +===== `size` + + * Value type is <> + * Default value is `1000` + +This allows you to set the maximum number of hits returned per scroll. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +SSL + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Basic Auth - username + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/elasticsearch-v4.0.5.asciidoc b/docs/versioned-plugins/inputs/elasticsearch-v4.0.5.asciidoc new file mode 100644 index 000000000..a45fde460 --- /dev/null +++ b/docs/versioned-plugins/inputs/elasticsearch-v4.0.5.asciidoc @@ -0,0 +1,230 @@ +:plugin: elasticsearch +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.5 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-elasticsearch/blob/v4.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch input +plugin to version 4.0.2 or higher. + +================================================================================ + +Read from an Elasticsearch cluster, based on search query results. +This is useful for replaying test logs, reindexing, etc. + +Example: +[source,ruby] + input { + # Read all documents from Elasticsearch matching the given query + elasticsearch { + hosts => "localhost" + query => '{ "query": { "match": { "statuscode": 200 } }, "sort": [ "_doc" ] }' + } + } + +This would create an Elasticsearch query with the following format: +[source,json] + curl 'http://localhost:9200/logstash-*/_search?&scroll=1m&size=1000' -d '{ + "query": { + "match": { + "statuscode": 200 + } + }, + "sort": [ "_doc" ] + }' + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-docinfo>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-docinfo_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-docinfo_target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scroll>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ca_file"] +===== `ca_file` + + * Value type is <> + * There is no default value for this setting. + +SSL Certificate Authority file in PEM encoded format, must also +include any chain certificates as necessary. + +[id="{version}-plugins-{type}s-{plugin}-docinfo"] +===== `docinfo` + + * Value type is <> + * Default value is `false` + +If set, include Elasticsearch document information such as index, type, and +the id in the event. + +It might be important to note, with regards to metadata, that if you're +ingesting documents with the intent to re-index them (or just update them) +that the `action` option in the elasticsearch output wants to know how to +handle those things. It can be dynamically assigned with a field +added to the metadata. + +Example +[source, ruby] + input { + elasticsearch { + hosts => "es.production.mysite.org" + index => "mydata-2018.09.*" + query => '{ "query": { "query_string": { "query": "*" } } }' + size => 500 + scroll => "5m" + docinfo => true + } + } + output { + elasticsearch { + index => "copy-of-production.%{[@metadata][_index]}" + document_type => "%{[@metadata][_type]}" + document_id => "%{[@metadata][_id]}" + } + } + + +[id="{version}-plugins-{type}s-{plugin}-docinfo_fields"] +===== `docinfo_fields` + + * Value type is <> + * Default value is `["_index", "_type", "_id"]` + +If document metadata storage is requested by enabling the `docinfo` +option, this option lists the metadata fields to save in the current +event. See +[Document Metadata](http://www.elasticsearch.org/guide/en/elasticsearch/guide/current/_document_metadata.html) +in the Elasticsearch documentation for more information. + +[id="{version}-plugins-{type}s-{plugin}-docinfo_target"] +===== `docinfo_target` + + * Value type is <> + * Default value is `"@metadata"` + +If document metadata storage is requested by enabling the `docinfo` +option, this option names the field under which to store the metadata +fields as subfields. + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * There is no default value for this setting. + +List of one or more Elasticsearch hosts to use for querying. Each host +can be either IP, HOST, IP:port, or HOST:port. The port defaults to +9200. + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-*"` + +The index or alias to search. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +The password to use together with the username in the `user` option +when authenticating to the Elasticsearch server. If set to an empty +string authentication will be disabled. + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * Value type is <> + * Default value is `'{ "sort": [ "_doc" ] }'` + +The query to be executed. Read the +[Elasticsearch query DSL documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html) +for more information. + +[id="{version}-plugins-{type}s-{plugin}-scroll"] +===== `scroll` + + * Value type is <> + * Default value is `"1m"` + +This parameter controls the keepalive time in seconds of the scrolling +request and initiates the scrolling process. The timeout applies per +round trip (i.e. between the previous scroll request, to the next). + +[id="{version}-plugins-{type}s-{plugin}-size"] +===== `size` + + * Value type is <> + * Default value is `1000` + +This allows you to set the maximum number of hits returned per scroll. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +If enabled, SSL will be used when communicating with the Elasticsearch +server (i.e. HTTPS will be used instead of plain HTTP). + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +The username to use together with the password in the `password` +option when authenticating to the Elasticsearch server. If set to an +empty string authentication will be disabled. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/elasticsearch-v4.0.6.asciidoc b/docs/versioned-plugins/inputs/elasticsearch-v4.0.6.asciidoc new file mode 100644 index 000000000..5d2391848 --- /dev/null +++ b/docs/versioned-plugins/inputs/elasticsearch-v4.0.6.asciidoc @@ -0,0 +1,230 @@ +:plugin: elasticsearch +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.6 +:release_date: 2017-09-20 +:changelog_url: https://github.com/logstash-plugins/logstash-input-elasticsearch/blob/v4.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch input +plugin to version 4.0.2 or higher. + +================================================================================ + +Read from an Elasticsearch cluster, based on search query results. +This is useful for replaying test logs, reindexing, etc. + +Example: +[source,ruby] + input { + # Read all documents from Elasticsearch matching the given query + elasticsearch { + hosts => "localhost" + query => '{ "query": { "match": { "statuscode": 200 } }, "sort": [ "_doc" ] }' + } + } + +This would create an Elasticsearch query with the following format: +[source,json] + curl 'http://localhost:9200/logstash-*/_search?&scroll=1m&size=1000' -d '{ + "query": { + "match": { + "statuscode": 200 + } + }, + "sort": [ "_doc" ] + }' + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-docinfo>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-docinfo_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-docinfo_target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scroll>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ca_file"] +===== `ca_file` + + * Value type is <> + * There is no default value for this setting. + +SSL Certificate Authority file in PEM encoded format, must also +include any chain certificates as necessary. + +[id="{version}-plugins-{type}s-{plugin}-docinfo"] +===== `docinfo` + + * Value type is <> + * Default value is `false` + +If set, include Elasticsearch document information such as index, type, and +the id in the event. + +It might be important to note, with regards to metadata, that if you're +ingesting documents with the intent to re-index them (or just update them) +that the `action` option in the elasticsearch output wants to know how to +handle those things. It can be dynamically assigned with a field +added to the metadata. + +Example +[source, ruby] + input { + elasticsearch { + hosts => "es.production.mysite.org" + index => "mydata-2018.09.*" + query => '{ "query": { "query_string": { "query": "*" } } }' + size => 500 + scroll => "5m" + docinfo => true + } + } + output { + elasticsearch { + index => "copy-of-production.%{[@metadata][_index]}" + document_type => "%{[@metadata][_type]}" + document_id => "%{[@metadata][_id]}" + } + } + + +[id="{version}-plugins-{type}s-{plugin}-docinfo_fields"] +===== `docinfo_fields` + + * Value type is <> + * Default value is `["_index", "_type", "_id"]` + +If document metadata storage is requested by enabling the `docinfo` +option, this option lists the metadata fields to save in the current +event. See +[Document Metadata](http://www.elasticsearch.org/guide/en/elasticsearch/guide/current/_document_metadata.html) +in the Elasticsearch documentation for more information. + +[id="{version}-plugins-{type}s-{plugin}-docinfo_target"] +===== `docinfo_target` + + * Value type is <> + * Default value is `"@metadata"` + +If document metadata storage is requested by enabling the `docinfo` +option, this option names the field under which to store the metadata +fields as subfields. + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * There is no default value for this setting. + +List of one or more Elasticsearch hosts to use for querying. Each host +can be either IP, HOST, IP:port, or HOST:port. The port defaults to +9200. + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-*"` + +The index or alias to search. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +The password to use together with the username in the `user` option +when authenticating to the Elasticsearch server. If set to an empty +string authentication will be disabled. + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * Value type is <> + * Default value is `'{ "sort": [ "_doc" ] }'` + +The query to be executed. Read the +[Elasticsearch query DSL documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html) +for more information. + +[id="{version}-plugins-{type}s-{plugin}-scroll"] +===== `scroll` + + * Value type is <> + * Default value is `"1m"` + +This parameter controls the keepalive time in seconds of the scrolling +request and initiates the scrolling process. The timeout applies per +round trip (i.e. between the previous scroll request, to the next). + +[id="{version}-plugins-{type}s-{plugin}-size"] +===== `size` + + * Value type is <> + * Default value is `1000` + +This allows you to set the maximum number of hits returned per scroll. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +If enabled, SSL will be used when communicating with the Elasticsearch +server (i.e. HTTPS will be used instead of plain HTTP). + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +The username to use together with the password in the `password` +option when authenticating to the Elasticsearch server. If set to an +empty string authentication will be disabled. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/elasticsearch-v4.1.0.asciidoc b/docs/versioned-plugins/inputs/elasticsearch-v4.1.0.asciidoc new file mode 100644 index 000000000..d583e4fbd --- /dev/null +++ b/docs/versioned-plugins/inputs/elasticsearch-v4.1.0.asciidoc @@ -0,0 +1,230 @@ +:plugin: elasticsearch +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.0 +:release_date: 2017-10-27 +:changelog_url: https://github.com/logstash-plugins/logstash-input-elasticsearch/blob/v4.1.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch input +plugin to version 4.0.2 or higher. + +================================================================================ + +Read from an Elasticsearch cluster, based on search query results. +This is useful for replaying test logs, reindexing, etc. + +Example: +[source,ruby] + input { + # Read all documents from Elasticsearch matching the given query + elasticsearch { + hosts => "localhost" + query => '{ "query": { "match": { "statuscode": 200 } }, "sort": [ "_doc" ] }' + } + } + +This would create an Elasticsearch query with the following format: +[source,json] + curl 'http://localhost:9200/logstash-*/_search?&scroll=1m&size=1000' -d '{ + "query": { + "match": { + "statuscode": 200 + } + }, + "sort": [ "_doc" ] + }' + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-docinfo>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-docinfo_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-docinfo_target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scroll>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ca_file"] +===== `ca_file` + + * Value type is <> + * There is no default value for this setting. + +SSL Certificate Authority file in PEM encoded format, must also +include any chain certificates as necessary. + +[id="{version}-plugins-{type}s-{plugin}-docinfo"] +===== `docinfo` + + * Value type is <> + * Default value is `false` + +If set, include Elasticsearch document information such as index, type, and +the id in the event. + +It might be important to note, with regards to metadata, that if you're +ingesting documents with the intent to re-index them (or just update them) +that the `action` option in the elasticsearch output wants to know how to +handle those things. It can be dynamically assigned with a field +added to the metadata. + +Example +[source, ruby] + input { + elasticsearch { + hosts => "es.production.mysite.org" + index => "mydata-2018.09.*" + query => '{ "query": { "query_string": { "query": "*" } } }' + size => 500 + scroll => "5m" + docinfo => true + } + } + output { + elasticsearch { + index => "copy-of-production.%{[@metadata][_index]}" + document_type => "%{[@metadata][_type]}" + document_id => "%{[@metadata][_id]}" + } + } + + +[id="{version}-plugins-{type}s-{plugin}-docinfo_fields"] +===== `docinfo_fields` + + * Value type is <> + * Default value is `["_index", "_type", "_id"]` + +If document metadata storage is requested by enabling the `docinfo` +option, this option lists the metadata fields to save in the current +event. See +[Document Metadata](http://www.elasticsearch.org/guide/en/elasticsearch/guide/current/_document_metadata.html) +in the Elasticsearch documentation for more information. + +[id="{version}-plugins-{type}s-{plugin}-docinfo_target"] +===== `docinfo_target` + + * Value type is <> + * Default value is `"@metadata"` + +If document metadata storage is requested by enabling the `docinfo` +option, this option names the field under which to store the metadata +fields as subfields. + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * There is no default value for this setting. + +List of one or more Elasticsearch hosts to use for querying. Each host +can be either IP, HOST, IP:port, or HOST:port. The port defaults to +9200. + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-*"` + +The index or alias to search. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +The password to use together with the username in the `user` option +when authenticating to the Elasticsearch server. If set to an empty +string authentication will be disabled. + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * Value type is <> + * Default value is `'{ "sort": [ "_doc" ] }'` + +The query to be executed. Read the +[Elasticsearch query DSL documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html) +for more information. + +[id="{version}-plugins-{type}s-{plugin}-scroll"] +===== `scroll` + + * Value type is <> + * Default value is `"1m"` + +This parameter controls the keepalive time in seconds of the scrolling +request and initiates the scrolling process. The timeout applies per +round trip (i.e. between the previous scroll request, to the next). + +[id="{version}-plugins-{type}s-{plugin}-size"] +===== `size` + + * Value type is <> + * Default value is `1000` + +This allows you to set the maximum number of hits returned per scroll. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +If enabled, SSL will be used when communicating with the Elasticsearch +server (i.e. HTTPS will be used instead of plain HTTP). + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +The username to use together with the password in the `password` +option when authenticating to the Elasticsearch server. If set to an +empty string authentication will be disabled. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/elasticsearch-v4.1.1.asciidoc b/docs/versioned-plugins/inputs/elasticsearch-v4.1.1.asciidoc new file mode 100644 index 000000000..75c2c4c95 --- /dev/null +++ b/docs/versioned-plugins/inputs/elasticsearch-v4.1.1.asciidoc @@ -0,0 +1,230 @@ +:plugin: elasticsearch +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.1 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-elasticsearch/blob/v4.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch input +plugin to version 4.0.2 or higher. + +================================================================================ + +Read from an Elasticsearch cluster, based on search query results. +This is useful for replaying test logs, reindexing, etc. + +Example: +[source,ruby] + input { + # Read all documents from Elasticsearch matching the given query + elasticsearch { + hosts => "localhost" + query => '{ "query": { "match": { "statuscode": 200 } }, "sort": [ "_doc" ] }' + } + } + +This would create an Elasticsearch query with the following format: +[source,json] + curl 'http://localhost:9200/logstash-*/_search?&scroll=1m&size=1000' -d '{ + "query": { + "match": { + "statuscode": 200 + } + }, + "sort": [ "_doc" ] + }' + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-docinfo>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-docinfo_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-docinfo_target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scroll>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ca_file"] +===== `ca_file` + + * Value type is <> + * There is no default value for this setting. + +SSL Certificate Authority file in PEM encoded format, must also +include any chain certificates as necessary. + +[id="{version}-plugins-{type}s-{plugin}-docinfo"] +===== `docinfo` + + * Value type is <> + * Default value is `false` + +If set, include Elasticsearch document information such as index, type, and +the id in the event. + +It might be important to note, with regards to metadata, that if you're +ingesting documents with the intent to re-index them (or just update them) +that the `action` option in the elasticsearch output wants to know how to +handle those things. It can be dynamically assigned with a field +added to the metadata. + +Example +[source, ruby] + input { + elasticsearch { + hosts => "es.production.mysite.org" + index => "mydata-2018.09.*" + query => '{ "query": { "query_string": { "query": "*" } } }' + size => 500 + scroll => "5m" + docinfo => true + } + } + output { + elasticsearch { + index => "copy-of-production.%{[@metadata][_index]}" + document_type => "%{[@metadata][_type]}" + document_id => "%{[@metadata][_id]}" + } + } + + +[id="{version}-plugins-{type}s-{plugin}-docinfo_fields"] +===== `docinfo_fields` + + * Value type is <> + * Default value is `["_index", "_type", "_id"]` + +If document metadata storage is requested by enabling the `docinfo` +option, this option lists the metadata fields to save in the current +event. See +[Document Metadata](http://www.elasticsearch.org/guide/en/elasticsearch/guide/current/_document_metadata.html) +in the Elasticsearch documentation for more information. + +[id="{version}-plugins-{type}s-{plugin}-docinfo_target"] +===== `docinfo_target` + + * Value type is <> + * Default value is `"@metadata"` + +If document metadata storage is requested by enabling the `docinfo` +option, this option names the field under which to store the metadata +fields as subfields. + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * There is no default value for this setting. + +List of one or more Elasticsearch hosts to use for querying. Each host +can be either IP, HOST, IP:port, or HOST:port. The port defaults to +9200. + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-*"` + +The index or alias to search. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +The password to use together with the username in the `user` option +when authenticating to the Elasticsearch server. If set to an empty +string authentication will be disabled. + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * Value type is <> + * Default value is `'{ "sort": [ "_doc" ] }'` + +The query to be executed. Read the +[Elasticsearch query DSL documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html) +for more information. + +[id="{version}-plugins-{type}s-{plugin}-scroll"] +===== `scroll` + + * Value type is <> + * Default value is `"1m"` + +This parameter controls the keepalive time in seconds of the scrolling +request and initiates the scrolling process. The timeout applies per +round trip (i.e. between the previous scroll request, to the next). + +[id="{version}-plugins-{type}s-{plugin}-size"] +===== `size` + + * Value type is <> + * Default value is `1000` + +This allows you to set the maximum number of hits returned per scroll. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +If enabled, SSL will be used when communicating with the Elasticsearch +server (i.e. HTTPS will be used instead of plain HTTP). + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +The username to use together with the password in the `password` +option when authenticating to the Elasticsearch server. If set to an +empty string authentication will be disabled. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/eventlog-index.asciidoc b/docs/versioned-plugins/inputs/eventlog-index.asciidoc new file mode 100644 index 000000000..b0d16b02d --- /dev/null +++ b/docs/versioned-plugins/inputs/eventlog-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: eventlog +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::eventlog-v4.1.2.asciidoc[] +include::eventlog-v4.1.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/eventlog-v4.1.1.asciidoc b/docs/versioned-plugins/inputs/eventlog-v4.1.1.asciidoc new file mode 100644 index 000000000..ddd773f17 --- /dev/null +++ b/docs/versioned-plugins/inputs/eventlog-v4.1.1.asciidoc @@ -0,0 +1,74 @@ +:plugin: eventlog +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-eventlog/blob/v4.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Eventlog + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will pull events from a http://msdn.microsoft.com/en-us/library/windows/desktop/bb309026%28v=vs.85%29.aspx[Windows Event Log]. +Note that Windows Event Logs are stored on disk in a binary format and are only accessible from the Win32 API. +This means Losgtash needs to be running as an agent on Windows servers where you wish to collect logs +from, and will not be accesible across the network. + +To collect Events from the System Event Log, use a config like: +[source,ruby] + input { + eventlog { + type => 'Win32-EventLog' + logfile => 'System' + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Eventlog Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-logfile>> |<>, one of `["Application", "Security", "System"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `1000` + +How frequently should tail check for new event logs in ms (default: 1 second) + +[id="{version}-plugins-{type}s-{plugin}-logfile"] +===== `logfile` + + * Value can be any of: `Application`, `Security`, `System` + * Default value is `"Application"` + +Event Log Name +System and Security may require that privileges are given to the user running logstash. +see more at: https://social.technet.microsoft.com/forums/windowsserver/en-US/d2f813db-6142-4b5b-8d86-253ebb740473/easy-way-to-read-security-log + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/eventlog-v4.1.2.asciidoc b/docs/versioned-plugins/inputs/eventlog-v4.1.2.asciidoc new file mode 100644 index 000000000..658c35633 --- /dev/null +++ b/docs/versioned-plugins/inputs/eventlog-v4.1.2.asciidoc @@ -0,0 +1,74 @@ +:plugin: eventlog +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-eventlog/blob/v4.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Eventlog + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will pull events from a http://msdn.microsoft.com/en-us/library/windows/desktop/bb309026%28v=vs.85%29.aspx[Windows Event Log]. +Note that Windows Event Logs are stored on disk in a binary format and are only accessible from the Win32 API. +This means Losgtash needs to be running as an agent on Windows servers where you wish to collect logs +from, and will not be accesible across the network. + +To collect Events from the System Event Log, use a config like: +[source,ruby] + input { + eventlog { + type => 'Win32-EventLog' + logfile => 'System' + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Eventlog Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-logfile>> |<>, one of `["Application", "Security", "System"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `1000` + +How frequently should tail check for new event logs in ms (default: 1 second) + +[id="{version}-plugins-{type}s-{plugin}-logfile"] +===== `logfile` + + * Value can be any of: `Application`, `Security`, `System` + * Default value is `"Application"` + +Event Log Name +System and Security may require that privileges are given to the user running logstash. +see more at: https://social.technet.microsoft.com/forums/windowsserver/en-US/d2f813db-6142-4b5b-8d86-253ebb740473/easy-way-to-read-security-log + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/example-index.asciidoc b/docs/versioned-plugins/inputs/example-index.asciidoc new file mode 100644 index 000000000..56c312bd8 --- /dev/null +++ b/docs/versioned-plugins/inputs/example-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: example +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/inputs/exec-index.asciidoc b/docs/versioned-plugins/inputs/exec-index.asciidoc new file mode 100644 index 000000000..c7c61d1c4 --- /dev/null +++ b/docs/versioned-plugins/inputs/exec-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: exec +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::exec-v3.1.5.asciidoc[] +include::exec-v3.1.4.asciidoc[] +include::exec-v3.1.3.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/exec-v3.1.3.asciidoc b/docs/versioned-plugins/inputs/exec-v3.1.3.asciidoc new file mode 100644 index 000000000..d3295b1bd --- /dev/null +++ b/docs/versioned-plugins/inputs/exec-v3.1.3.asciidoc @@ -0,0 +1,69 @@ +:plugin: exec +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-exec/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Exec input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Periodically run a shell command and capture the whole output as an event. + +Notes: + +* The `command` field of this event will be the command run. +* The `message` field of this event will be the entire stdout of the command. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Exec Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-command"] +===== `command` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Command to run. For example, `uptime` + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Interval to run the command. Value is in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/exec-v3.1.4.asciidoc b/docs/versioned-plugins/inputs/exec-v3.1.4.asciidoc new file mode 100644 index 000000000..a30ca6826 --- /dev/null +++ b/docs/versioned-plugins/inputs/exec-v3.1.4.asciidoc @@ -0,0 +1,69 @@ +:plugin: exec +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-exec/blob/v3.1.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Exec input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Periodically run a shell command and capture the whole output as an event. + +Notes: + +* The `command` field of this event will be the command run. +* The `message` field of this event will be the entire stdout of the command. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Exec Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-command"] +===== `command` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Command to run. For example, `uptime` + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Interval to run the command. Value is in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/exec-v3.1.5.asciidoc b/docs/versioned-plugins/inputs/exec-v3.1.5.asciidoc new file mode 100644 index 000000000..dcc9315c6 --- /dev/null +++ b/docs/versioned-plugins/inputs/exec-v3.1.5.asciidoc @@ -0,0 +1,69 @@ +:plugin: exec +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-exec/blob/v3.1.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Exec input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Periodically run a shell command and capture the whole output as an event. + +Notes: + +* The `command` field of this event will be the command run. +* The `message` field of this event will be the entire stdout of the command. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Exec Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-command"] +===== `command` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Command to run. For example, `uptime` + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Interval to run the command. Value is in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/file-index.asciidoc b/docs/versioned-plugins/inputs/file-index.asciidoc new file mode 100644 index 000000000..a4e00321d --- /dev/null +++ b/docs/versioned-plugins/inputs/file-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: file +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::file-v4.0.3.asciidoc[] +include::file-v4.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/file-v4.0.2.asciidoc b/docs/versioned-plugins/inputs/file-v4.0.2.asciidoc new file mode 100644 index 000000000..16472c859 --- /dev/null +++ b/docs/versioned-plugins/inputs/file-v4.0.2.asciidoc @@ -0,0 +1,256 @@ +:plugin: file +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-file/blob/v4.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== File input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from files, normally by tailing them in a manner +similar to `tail -0F` but optionally reading them from the +beginning. + +By default, each event is assumed to be one line and a line is +taken to be the text before a newline character. +Normally, logging will add a newline to the end of each line written. +If you would like to join multiple log lines into one event, +you'll want to use the multiline codec or filter. + +The plugin aims to track changing files and emit new content as it's +appended to each file. It's not well-suited for reading a file from +beginning to end and storing all of it in a single event (not even +with the multiline codec or filter). + +==== Reading from remote network volumes + +The file input is not tested on remote filesystems such as NFS, Samba, s3fs-fuse, etc. These +remote filesystems typically have behaviors that are very different from local filesystems and +are therefore unlikely to work correctly when used with the file input. + +==== Tracking of current position in watched files + +The plugin keeps track of the current position in each file by +recording it in a separate file named sincedb. This makes it +possible to stop and restart Logstash and have it pick up where it +left off without missing the lines that were added to the file while +Logstash was stopped. + +By default, the sincedb file is placed in the home directory of the +user running Logstash with a filename based on the filename patterns +being watched (i.e. the `path` option). Thus, changing the filename +patterns will result in a new sincedb file being used and any +existing current position state will be lost. If you change your +patterns with any frequency it might make sense to explicitly choose +a sincedb path with the `sincedb_path` option. + +A different `sincedb_path` must be used for each input. Using the same +path will cause issues. The read checkpoints for each input must be +stored in a different path so the information does not override. + +Sincedb files are text files with four columns: + +. The inode number (or equivalent). +. The major device number of the file system (or equivalent). +. The minor device number of the file system (or equivalent). +. The current byte offset within the file. + +On non-Windows systems you can obtain the inode number of a file +with e.g. `ls -li`. + +==== File rotation + +File rotation is detected and handled by this input, regardless of +whether the file is rotated via a rename or a copy operation. To +support programs that write to the rotated file for some time after +the rotation has taken place, include both the original filename and +the rotated filename (e.g. /var/log/syslog and /var/log/syslog.1) in +the filename patterns to watch (the `path` option). Note that the +rotated filename will be treated as a new file so if +`start_position` is set to 'beginning' the rotated file will be +reprocessed. + +With the default value of `start_position` ('end') any messages +written to the end of the file between the last read operation prior +to the rotation and its reopening under the new name (an interval +determined by the `stat_interval` and `discover_interval` options) +will not get picked up. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== File Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-close_older>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-discover_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ignore_older>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_open_files>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_write_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-start_position>> |<>, one of `["beginning", "end"]`|No +| <<{version}-plugins-{type}s-{plugin}-stat_interval>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-close_older"] +===== `close_older` + + * Value type is <> + * Default value is `3600` + +The file input closes any files that were last read the specified +timespan in seconds ago. +This has different implications depending on if a file is being tailed or +read. If tailing, and there is a large time gap in incoming data the file +can be closed (allowing other files to be opened) but will be queued for +reopening when new data is detected. If reading, the file will be closed +after closed_older seconds from when the last bytes were read. +The default is 1 hour + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is <> + * Default value is `"\n"` + +set the new line delimiter, defaults to "\n" + +[id="{version}-plugins-{type}s-{plugin}-discover_interval"] +===== `discover_interval` + + * Value type is <> + * Default value is `15` + +How often (in seconds) we expand the filename patterns in the +`path` option to discover new files to watch. + +[id="{version}-plugins-{type}s-{plugin}-exclude"] +===== `exclude` + + * Value type is <> + * There is no default value for this setting. + +Exclusions (matched against the filename, not full path). Filename +patterns are valid here, too. For example, if you have +[source,ruby] + path => "/var/log/*" + +You might want to exclude gzipped files: +[source,ruby] + exclude => "*.gz" + +[id="{version}-plugins-{type}s-{plugin}-ignore_older"] +===== `ignore_older` + + * Value type is <> + * There is no default value for this setting. + +When the file input discovers a file that was last modified +before the specified timespan in seconds, the file is ignored. +After it's discovery, if an ignored file is modified it is no +longer ignored and any new data is read. By default, this option is +disabled. Note this unit is in seconds. + +[id="{version}-plugins-{type}s-{plugin}-max_open_files"] +===== `max_open_files` + + * Value type is <> + * There is no default value for this setting. + +What is the maximum number of file_handles that this input consumes +at any one time. Use close_older to close some files if you need to +process more files than this number. This should not be set to the +maximum the OS can do because file handles are needed for other +LS plugins and OS processes. +The default of 4095 is set in filewatch. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The path(s) to the file(s) to use as an input. +You can use filename patterns here, such as `/var/log/*.log`. +If you use a pattern like `/var/log/**/*.log`, a recursive search +of `/var/log` will be done for all `*.log` files. +Paths must be absolute and cannot be relative. + +You may also configure multiple paths. See an example +on the <>. + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is <> + * There is no default value for this setting. + +Path of the sincedb database file (keeps track of the current +position of monitored log files) that will be written to disk. +The default will write sincedb files to `/plugins/inputs/file` +NOTE: it must be a file path and not a directory path + +[id="{version}-plugins-{type}s-{plugin}-sincedb_write_interval"] +===== `sincedb_write_interval` + + * Value type is <> + * Default value is `15` + +How often (in seconds) to write a since database with the current position of +monitored log files. + +[id="{version}-plugins-{type}s-{plugin}-start_position"] +===== `start_position` + + * Value can be any of: `beginning`, `end` + * Default value is `"end"` + +Choose where Logstash starts initially reading files: at the beginning or +at the end. The default behavior treats files like live streams and thus +starts at the end. If you have old data you want to import, set this +to 'beginning'. + +This option only modifies "first contact" situations where a file +is new and not seen before, i.e. files that don't have a current +position recorded in a sincedb file read by Logstash. If a file +has already been seen before, this option has no effect and the +position recorded in the sincedb file will be used. + +[id="{version}-plugins-{type}s-{plugin}-stat_interval"] +===== `stat_interval` + + * Value type is <> + * Default value is `1` + +How often (in seconds) we stat files to see if they have been modified. +Increasing this interval will decrease the number of system calls we make, +but increase the time to detect new log lines. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/file-v4.0.3.asciidoc b/docs/versioned-plugins/inputs/file-v4.0.3.asciidoc new file mode 100644 index 000000000..ccbe78e84 --- /dev/null +++ b/docs/versioned-plugins/inputs/file-v4.0.3.asciidoc @@ -0,0 +1,256 @@ +:plugin: file +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-file/blob/v4.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== File input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from files, normally by tailing them in a manner +similar to `tail -0F` but optionally reading them from the +beginning. + +By default, each event is assumed to be one line and a line is +taken to be the text before a newline character. +Normally, logging will add a newline to the end of each line written. +If you would like to join multiple log lines into one event, +you'll want to use the multiline codec or filter. + +The plugin aims to track changing files and emit new content as it's +appended to each file. It's not well-suited for reading a file from +beginning to end and storing all of it in a single event (not even +with the multiline codec or filter). + +==== Reading from remote network volumes + +The file input is not tested on remote filesystems such as NFS, Samba, s3fs-fuse, etc. These +remote filesystems typically have behaviors that are very different from local filesystems and +are therefore unlikely to work correctly when used with the file input. + +==== Tracking of current position in watched files + +The plugin keeps track of the current position in each file by +recording it in a separate file named sincedb. This makes it +possible to stop and restart Logstash and have it pick up where it +left off without missing the lines that were added to the file while +Logstash was stopped. + +By default, the sincedb file is placed in the home directory of the +user running Logstash with a filename based on the filename patterns +being watched (i.e. the `path` option). Thus, changing the filename +patterns will result in a new sincedb file being used and any +existing current position state will be lost. If you change your +patterns with any frequency it might make sense to explicitly choose +a sincedb path with the `sincedb_path` option. + +A different `sincedb_path` must be used for each input. Using the same +path will cause issues. The read checkpoints for each input must be +stored in a different path so the information does not override. + +Sincedb files are text files with four columns: + +. The inode number (or equivalent). +. The major device number of the file system (or equivalent). +. The minor device number of the file system (or equivalent). +. The current byte offset within the file. + +On non-Windows systems you can obtain the inode number of a file +with e.g. `ls -li`. + +==== File rotation + +File rotation is detected and handled by this input, regardless of +whether the file is rotated via a rename or a copy operation. To +support programs that write to the rotated file for some time after +the rotation has taken place, include both the original filename and +the rotated filename (e.g. /var/log/syslog and /var/log/syslog.1) in +the filename patterns to watch (the `path` option). Note that the +rotated filename will be treated as a new file so if +`start_position` is set to 'beginning' the rotated file will be +reprocessed. + +With the default value of `start_position` ('end') any messages +written to the end of the file between the last read operation prior +to the rotation and its reopening under the new name (an interval +determined by the `stat_interval` and `discover_interval` options) +will not get picked up. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== File Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-close_older>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-discover_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ignore_older>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_open_files>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_write_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-start_position>> |<>, one of `["beginning", "end"]`|No +| <<{version}-plugins-{type}s-{plugin}-stat_interval>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-close_older"] +===== `close_older` + + * Value type is <> + * Default value is `3600` + +The file input closes any files that were last read the specified +timespan in seconds ago. +This has different implications depending on if a file is being tailed or +read. If tailing, and there is a large time gap in incoming data the file +can be closed (allowing other files to be opened) but will be queued for +reopening when new data is detected. If reading, the file will be closed +after closed_older seconds from when the last bytes were read. +The default is 1 hour + +[id="{version}-plugins-{type}s-{plugin}-delimiter"] +===== `delimiter` + + * Value type is <> + * Default value is `"\n"` + +set the new line delimiter, defaults to "\n" + +[id="{version}-plugins-{type}s-{plugin}-discover_interval"] +===== `discover_interval` + + * Value type is <> + * Default value is `15` + +How often (in seconds) we expand the filename patterns in the +`path` option to discover new files to watch. + +[id="{version}-plugins-{type}s-{plugin}-exclude"] +===== `exclude` + + * Value type is <> + * There is no default value for this setting. + +Exclusions (matched against the filename, not full path). Filename +patterns are valid here, too. For example, if you have +[source,ruby] + path => "/var/log/*" + +You might want to exclude gzipped files: +[source,ruby] + exclude => "*.gz" + +[id="{version}-plugins-{type}s-{plugin}-ignore_older"] +===== `ignore_older` + + * Value type is <> + * There is no default value for this setting. + +When the file input discovers a file that was last modified +before the specified timespan in seconds, the file is ignored. +After it's discovery, if an ignored file is modified it is no +longer ignored and any new data is read. By default, this option is +disabled. Note this unit is in seconds. + +[id="{version}-plugins-{type}s-{plugin}-max_open_files"] +===== `max_open_files` + + * Value type is <> + * There is no default value for this setting. + +What is the maximum number of file_handles that this input consumes +at any one time. Use close_older to close some files if you need to +process more files than this number. This should not be set to the +maximum the OS can do because file handles are needed for other +LS plugins and OS processes. +The default of 4095 is set in filewatch. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The path(s) to the file(s) to use as an input. +You can use filename patterns here, such as `/var/log/*.log`. +If you use a pattern like `/var/log/**/*.log`, a recursive search +of `/var/log` will be done for all `*.log` files. +Paths must be absolute and cannot be relative. + +You may also configure multiple paths. See an example +on the <>. + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is <> + * There is no default value for this setting. + +Path of the sincedb database file (keeps track of the current +position of monitored log files) that will be written to disk. +The default will write sincedb files to `/plugins/inputs/file` +NOTE: it must be a file path and not a directory path + +[id="{version}-plugins-{type}s-{plugin}-sincedb_write_interval"] +===== `sincedb_write_interval` + + * Value type is <> + * Default value is `15` + +How often (in seconds) to write a since database with the current position of +monitored log files. + +[id="{version}-plugins-{type}s-{plugin}-start_position"] +===== `start_position` + + * Value can be any of: `beginning`, `end` + * Default value is `"end"` + +Choose where Logstash starts initially reading files: at the beginning or +at the end. The default behavior treats files like live streams and thus +starts at the end. If you have old data you want to import, set this +to 'beginning'. + +This option only modifies "first contact" situations where a file +is new and not seen before, i.e. files that don't have a current +position recorded in a sincedb file read by Logstash. If a file +has already been seen before, this option has no effect and the +position recorded in the sincedb file will be used. + +[id="{version}-plugins-{type}s-{plugin}-stat_interval"] +===== `stat_interval` + + * Value type is <> + * Default value is `1` + +How often (in seconds) we stat files to see if they have been modified. +Increasing this interval will decrease the number of system calls we make, +but increase the time to detect new log lines. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/fluentd-index.asciidoc b/docs/versioned-plugins/inputs/fluentd-index.asciidoc new file mode 100644 index 000000000..1eba3bdba --- /dev/null +++ b/docs/versioned-plugins/inputs/fluentd-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: fluentd +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/inputs/ganglia-index.asciidoc b/docs/versioned-plugins/inputs/ganglia-index.asciidoc new file mode 100644 index 000000000..0f3dc974f --- /dev/null +++ b/docs/versioned-plugins/inputs/ganglia-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: ganglia +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::ganglia-v3.1.3.asciidoc[] +include::ganglia-v3.1.2.asciidoc[] +include::ganglia-v3.1.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/ganglia-v3.1.1.asciidoc b/docs/versioned-plugins/inputs/ganglia-v3.1.1.asciidoc new file mode 100644 index 000000000..d3e0e1fe2 --- /dev/null +++ b/docs/versioned-plugins/inputs/ganglia-v3.1.1.asciidoc @@ -0,0 +1,63 @@ +:plugin: ganglia +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-ganglia/blob/v3.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Ganglia input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read ganglia packets from the network via udp + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ganglia Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address to listen on + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8649` + +The port to listen on. Remember that ports less than 1024 (privileged +ports) may require root to use. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/ganglia-v3.1.2.asciidoc b/docs/versioned-plugins/inputs/ganglia-v3.1.2.asciidoc new file mode 100644 index 000000000..ba7dfbe11 --- /dev/null +++ b/docs/versioned-plugins/inputs/ganglia-v3.1.2.asciidoc @@ -0,0 +1,63 @@ +:plugin: ganglia +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-ganglia/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Ganglia input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read ganglia packets from the network via udp + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ganglia Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address to listen on + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8649` + +The port to listen on. Remember that ports less than 1024 (privileged +ports) may require root to use. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/ganglia-v3.1.3.asciidoc b/docs/versioned-plugins/inputs/ganglia-v3.1.3.asciidoc new file mode 100644 index 000000000..916688da2 --- /dev/null +++ b/docs/versioned-plugins/inputs/ganglia-v3.1.3.asciidoc @@ -0,0 +1,63 @@ +:plugin: ganglia +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-ganglia/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Ganglia input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read ganglia packets from the network via udp + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ganglia Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address to listen on + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8649` + +The port to listen on. Remember that ports less than 1024 (privileged +ports) may require root to use. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/gelf-index.asciidoc b/docs/versioned-plugins/inputs/gelf-index.asciidoc new file mode 100644 index 000000000..f951c59ba --- /dev/null +++ b/docs/versioned-plugins/inputs/gelf-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: gelf +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-14 +| <> | 2017-08-15 +| <> | 2017-06-27 +| <> | 2017-06-23 +|======================================================================= + +include::gelf-v3.0.7.asciidoc[] +include::gelf-v3.0.6.asciidoc[] +include::gelf-v3.0.5.asciidoc[] +include::gelf-v3.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/gelf-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/gelf-v3.0.4.asciidoc new file mode 100644 index 000000000..f18cfbb4b --- /dev/null +++ b/docs/versioned-plugins/inputs/gelf-v3.0.4.asciidoc @@ -0,0 +1,105 @@ +:plugin: gelf +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-gelf/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Gelf input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will read GELF messages as events over the network, +making it a good choice if you already use Graylog2 today. + +The main use case for this input is to leverage existing GELF +logging libraries such as the GELF log4j appender. A library used +by this plugin has a bug which prevents it parsing uncompressed data. +If you use the log4j appender you need to configure it like this to force +gzip even for small messages: + + + + + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Gelf Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-remap>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-strip_leading_underscore>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address or hostname to listen on. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `12201` + +The port to listen on. Remember that ports less than 1024 (privileged +ports) may require root to use. + +[id="{version}-plugins-{type}s-{plugin}-remap"] +===== `remap` + + * Value type is <> + * Default value is `true` + +Whether or not to remap the GELF message fields to Logstash event fields or +leave them intact. + +Remapping converts the following GELF fields to Logstash equivalents: + +* `full\_message` becomes `event.get("message")`. +* if there is no `full\_message`, `short\_message` becomes `event.get("message")`. + +[id="{version}-plugins-{type}s-{plugin}-strip_leading_underscore"] +===== `strip_leading_underscore` + + * Value type is <> + * Default value is `true` + +Whether or not to remove the leading `\_` in GELF fields or leave them +in place. (Logstash < 1.2 did not remove them by default.). Note that +GELF version 1.1 format now requires all non-standard fields to be added +as an "additional" field, beginning with an underscore. + +e.g. `\_foo` becomes `foo` + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/gelf-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/gelf-v3.0.5.asciidoc new file mode 100644 index 000000000..3ef29e5de --- /dev/null +++ b/docs/versioned-plugins/inputs/gelf-v3.0.5.asciidoc @@ -0,0 +1,105 @@ +:plugin: gelf +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-06-27 +:changelog_url: https://github.com/logstash-plugins/logstash-input-gelf/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Gelf input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will read GELF messages as events over the network, +making it a good choice if you already use Graylog2 today. + +The main use case for this input is to leverage existing GELF +logging libraries such as the GELF log4j appender. A library used +by this plugin has a bug which prevents it parsing uncompressed data. +If you use the log4j appender you need to configure it like this to force +gzip even for small messages: + + + + + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Gelf Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-remap>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-strip_leading_underscore>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address or hostname to listen on. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `12201` + +The port to listen on. Remember that ports less than 1024 (privileged +ports) may require root to use. + +[id="{version}-plugins-{type}s-{plugin}-remap"] +===== `remap` + + * Value type is <> + * Default value is `true` + +Whether or not to remap the GELF message fields to Logstash event fields or +leave them intact. + +Remapping converts the following GELF fields to Logstash equivalents: + +* `full\_message` becomes `event.get("message")`. +* if there is no `full\_message`, `short\_message` becomes `event.get("message")`. + +[id="{version}-plugins-{type}s-{plugin}-strip_leading_underscore"] +===== `strip_leading_underscore` + + * Value type is <> + * Default value is `true` + +Whether or not to remove the leading `\_` in GELF fields or leave them +in place. (Logstash < 1.2 did not remove them by default.). Note that +GELF version 1.1 format now requires all non-standard fields to be added +as an "additional" field, beginning with an underscore. + +e.g. `\_foo` becomes `foo` + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/gelf-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/gelf-v3.0.6.asciidoc new file mode 100644 index 000000000..234645899 --- /dev/null +++ b/docs/versioned-plugins/inputs/gelf-v3.0.6.asciidoc @@ -0,0 +1,105 @@ +:plugin: gelf +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-gelf/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Gelf input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will read GELF messages as events over the network, +making it a good choice if you already use Graylog2 today. + +The main use case for this input is to leverage existing GELF +logging libraries such as the GELF log4j appender. A library used +by this plugin has a bug which prevents it parsing uncompressed data. +If you use the log4j appender you need to configure it like this to force +gzip even for small messages: + + + + + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Gelf Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-remap>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-strip_leading_underscore>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address or hostname to listen on. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `12201` + +The port to listen on. Remember that ports less than 1024 (privileged +ports) may require root to use. + +[id="{version}-plugins-{type}s-{plugin}-remap"] +===== `remap` + + * Value type is <> + * Default value is `true` + +Whether or not to remap the GELF message fields to Logstash event fields or +leave them intact. + +Remapping converts the following GELF fields to Logstash equivalents: + +* `full\_message` becomes `event.get("message")`. +* if there is no `full\_message`, `short\_message` becomes `event.get("message")`. + +[id="{version}-plugins-{type}s-{plugin}-strip_leading_underscore"] +===== `strip_leading_underscore` + + * Value type is <> + * Default value is `true` + +Whether or not to remove the leading `\_` in GELF fields or leave them +in place. (Logstash < 1.2 did not remove them by default.). Note that +GELF version 1.1 format now requires all non-standard fields to be added +as an "additional" field, beginning with an underscore. + +e.g. `\_foo` becomes `foo` + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/gelf-v3.0.7.asciidoc b/docs/versioned-plugins/inputs/gelf-v3.0.7.asciidoc new file mode 100644 index 000000000..098f6d9cd --- /dev/null +++ b/docs/versioned-plugins/inputs/gelf-v3.0.7.asciidoc @@ -0,0 +1,105 @@ +:plugin: gelf +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.7 +:release_date: 2017-11-14 +:changelog_url: https://github.com/logstash-plugins/logstash-input-gelf/blob/v3.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Gelf input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will read GELF messages as events over the network, +making it a good choice if you already use Graylog2 today. + +The main use case for this input is to leverage existing GELF +logging libraries such as the GELF log4j appender. A library used +by this plugin has a bug which prevents it parsing uncompressed data. +If you use the log4j appender you need to configure it like this to force +gzip even for small messages: + + + + + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Gelf Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-remap>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-strip_leading_underscore>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address or hostname to listen on. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `12201` + +The port to listen on. Remember that ports less than 1024 (privileged +ports) may require root to use. + +[id="{version}-plugins-{type}s-{plugin}-remap"] +===== `remap` + + * Value type is <> + * Default value is `true` + +Whether or not to remap the GELF message fields to Logstash event fields or +leave them intact. + +Remapping converts the following GELF fields to Logstash equivalents: + +* `full\_message` becomes `event.get("message")`. +* if there is no `full\_message`, `short\_message` becomes `event.get("message")`. + +[id="{version}-plugins-{type}s-{plugin}-strip_leading_underscore"] +===== `strip_leading_underscore` + + * Value type is <> + * Default value is `true` + +Whether or not to remove the leading `\_` in GELF fields or leave them +in place. (Logstash < 1.2 did not remove them by default.). Note that +GELF version 1.1 format now requires all non-standard fields to be added +as an "additional" field, beginning with an underscore. + +e.g. `\_foo` becomes `foo` + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/gemfire-index.asciidoc b/docs/versioned-plugins/inputs/gemfire-index.asciidoc new file mode 100644 index 000000000..db38e5844 --- /dev/null +++ b/docs/versioned-plugins/inputs/gemfire-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: gemfire +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::gemfire-v2.0.6.asciidoc[] +include::gemfire-v2.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/gemfire-v2.0.5.asciidoc b/docs/versioned-plugins/inputs/gemfire-v2.0.5.asciidoc new file mode 100644 index 000000000..37c51a53e --- /dev/null +++ b/docs/versioned-plugins/inputs/gemfire-v2.0.5.asciidoc @@ -0,0 +1,132 @@ +:plugin: gemfire +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-gemfire/blob/v2.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Gemfire input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push events to a GemFire region. + +GemFire is an object database. + +To use this plugin you need to add gemfire.jar to your CLASSPATH. +Using format=json requires jackson.jar too; use of continuous +queries requires antlr.jar. + +Note: this plugin has only been tested with GemFire 7.0. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Gemfire Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cache_xml_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interest_regexp>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-serialization>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_name"] +===== `cache_name` + + * Value type is <> + * Default value is `"logstash"` + +Your client cache name + +[id="{version}-plugins-{type}s-{plugin}-cache_xml_file"] +===== `cache_xml_file` + + * Value type is <> + * Default value is `nil` + +The path to a GemFire client cache XML file. + +Example: + + + + + + + + + + + + +[id="{version}-plugins-{type}s-{plugin}-interest_regexp"] +===== `interest_regexp` + + * Value type is <> + * Default value is `".*"` + +A regexp to use when registering interest for cache events. +Ignored if a :query is specified. + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * Value type is <> + * Default value is `nil` + +A query to run as a GemFire "continuous query"; if specified it takes +precedence over :interest_regexp which will be ignore. + +Important: use of continuous queries requires subscriptions to be enabled on the client pool. + +[id="{version}-plugins-{type}s-{plugin}-region_name"] +===== `region_name` + + * Value type is <> + * Default value is `"Logstash"` + +The region name + +[id="{version}-plugins-{type}s-{plugin}-serialization"] +===== `serialization` + + * Value type is <> + * Default value is `nil` + +How the message is serialized in the cache. Can be one of "json" or "plain"; default is plain + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/gemfire-v2.0.6.asciidoc b/docs/versioned-plugins/inputs/gemfire-v2.0.6.asciidoc new file mode 100644 index 000000000..df87bca2d --- /dev/null +++ b/docs/versioned-plugins/inputs/gemfire-v2.0.6.asciidoc @@ -0,0 +1,132 @@ +:plugin: gemfire +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.6 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-gemfire/blob/v2.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Gemfire input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push events to a GemFire region. + +GemFire is an object database. + +To use this plugin you need to add gemfire.jar to your CLASSPATH. +Using format=json requires jackson.jar too; use of continuous +queries requires antlr.jar. + +Note: this plugin has only been tested with GemFire 7.0. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Gemfire Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cache_xml_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interest_regexp>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-serialization>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_name"] +===== `cache_name` + + * Value type is <> + * Default value is `"logstash"` + +Your client cache name + +[id="{version}-plugins-{type}s-{plugin}-cache_xml_file"] +===== `cache_xml_file` + + * Value type is <> + * Default value is `nil` + +The path to a GemFire client cache XML file. + +Example: + + + + + + + + + + + + +[id="{version}-plugins-{type}s-{plugin}-interest_regexp"] +===== `interest_regexp` + + * Value type is <> + * Default value is `".*"` + +A regexp to use when registering interest for cache events. +Ignored if a :query is specified. + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * Value type is <> + * Default value is `nil` + +A query to run as a GemFire "continuous query"; if specified it takes +precedence over :interest_regexp which will be ignore. + +Important: use of continuous queries requires subscriptions to be enabled on the client pool. + +[id="{version}-plugins-{type}s-{plugin}-region_name"] +===== `region_name` + + * Value type is <> + * Default value is `"Logstash"` + +The region name + +[id="{version}-plugins-{type}s-{plugin}-serialization"] +===== `serialization` + + * Value type is <> + * Default value is `nil` + +How the message is serialized in the cache. Can be one of "json" or "plain"; default is plain + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/generator-index.asciidoc b/docs/versioned-plugins/inputs/generator-index.asciidoc new file mode 100644 index 000000000..0e38e0ae6 --- /dev/null +++ b/docs/versioned-plugins/inputs/generator-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: generator +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::generator-v3.0.5.asciidoc[] +include::generator-v3.0.4.asciidoc[] +include::generator-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/generator-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/generator-v3.0.3.asciidoc new file mode 100644 index 000000000..06800174b --- /dev/null +++ b/docs/versioned-plugins/inputs/generator-v3.0.3.asciidoc @@ -0,0 +1,107 @@ +:plugin: generator +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-generator/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Generator input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Generate random log events. + +The general intention of this is to test performance of plugins. + +An event is generated first + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Generator Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lines>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-count"] +===== `count` + + * Value type is <> + * Default value is `0` + +Set how many messages should be generated. + +The default, `0`, means generate an unlimited number of events. + +[id="{version}-plugins-{type}s-{plugin}-lines"] +===== `lines` + + * Value type is <> + * There is no default value for this setting. + +The lines to emit, in order. This option cannot be used with the 'message' +setting. + +Example: +[source,ruby] + input { + generator { + lines => [ + "line 1", + "line 2", + "line 3" + ] + # Emit all lines 3 times. + count => 3 + } + } + +The above will emit `line 1` then `line 2` then `line`, then `line 1`, etc... + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * Default value is `"Hello world!"` + +The message string to use in the event. + +If you set this to `stdin` then this plugin will read a single line from +stdin and use that as the message string for every event. + +Otherwise, this value will be used verbatim as the event message. + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/generator-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/generator-v3.0.4.asciidoc new file mode 100644 index 000000000..8bdfcce00 --- /dev/null +++ b/docs/versioned-plugins/inputs/generator-v3.0.4.asciidoc @@ -0,0 +1,107 @@ +:plugin: generator +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-generator/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Generator input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Generate random log events. + +The general intention of this is to test performance of plugins. + +An event is generated first + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Generator Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lines>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-count"] +===== `count` + + * Value type is <> + * Default value is `0` + +Set how many messages should be generated. + +The default, `0`, means generate an unlimited number of events. + +[id="{version}-plugins-{type}s-{plugin}-lines"] +===== `lines` + + * Value type is <> + * There is no default value for this setting. + +The lines to emit, in order. This option cannot be used with the 'message' +setting. + +Example: +[source,ruby] + input { + generator { + lines => [ + "line 1", + "line 2", + "line 3" + ] + # Emit all lines 3 times. + count => 3 + } + } + +The above will emit `line 1` then `line 2` then `line`, then `line 1`, etc... + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * Default value is `"Hello world!"` + +The message string to use in the event. + +If you set this to `stdin` then this plugin will read a single line from +stdin and use that as the message string for every event. + +Otherwise, this value will be used verbatim as the event message. + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/generator-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/generator-v3.0.5.asciidoc new file mode 100644 index 000000000..15c46d454 --- /dev/null +++ b/docs/versioned-plugins/inputs/generator-v3.0.5.asciidoc @@ -0,0 +1,107 @@ +:plugin: generator +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-generator/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Generator input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Generate random log events. + +The general intention of this is to test performance of plugins. + +An event is generated first + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Generator Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lines>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-count"] +===== `count` + + * Value type is <> + * Default value is `0` + +Set how many messages should be generated. + +The default, `0`, means generate an unlimited number of events. + +[id="{version}-plugins-{type}s-{plugin}-lines"] +===== `lines` + + * Value type is <> + * There is no default value for this setting. + +The lines to emit, in order. This option cannot be used with the 'message' +setting. + +Example: +[source,ruby] + input { + generator { + lines => [ + "line 1", + "line 2", + "line 3" + ] + # Emit all lines 3 times. + count => 3 + } + } + +The above will emit `line 1` then `line 2` then `line`, then `line 1`, etc... + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * Default value is `"Hello world!"` + +The message string to use in the event. + +If you set this to `stdin` then this plugin will read a single line from +stdin and use that as the message string for every event. + +Otherwise, this value will be used verbatim as the event message. + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/github-index.asciidoc b/docs/versioned-plugins/inputs/github-index.asciidoc new file mode 100644 index 000000000..bad8448da --- /dev/null +++ b/docs/versioned-plugins/inputs/github-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: github +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::github-v3.0.5.asciidoc[] +include::github-v3.0.4.asciidoc[] +include::github-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/github-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/github-v3.0.3.asciidoc new file mode 100644 index 000000000..b20fa9d36 --- /dev/null +++ b/docs/versioned-plugins/inputs/github-v3.0.3.asciidoc @@ -0,0 +1,81 @@ +:plugin: github +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-github/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Github input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from github webhooks + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Github Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-drop_invalid>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-secret_token>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-drop_invalid"] +===== `drop_invalid` + + * Value type is <> + * Default value is `false` + +If Secret is defined, we drop the events that don't match. +Otherwise, we'll just add an invalid tag + +[id="{version}-plugins-{type}s-{plugin}-ip"] +===== `ip` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The ip to listen on + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on + +[id="{version}-plugins-{type}s-{plugin}-secret_token"] +===== `secret_token` + + * Value type is <> + * There is no default value for this setting. + +Your GitHub Secret Token for the webhook + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/github-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/github-v3.0.4.asciidoc new file mode 100644 index 000000000..38480ae4b --- /dev/null +++ b/docs/versioned-plugins/inputs/github-v3.0.4.asciidoc @@ -0,0 +1,81 @@ +:plugin: github +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-github/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Github input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from github webhooks + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Github Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-drop_invalid>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-secret_token>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-drop_invalid"] +===== `drop_invalid` + + * Value type is <> + * Default value is `false` + +If Secret is defined, we drop the events that don't match. +Otherwise, we'll just add an invalid tag + +[id="{version}-plugins-{type}s-{plugin}-ip"] +===== `ip` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The ip to listen on + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on + +[id="{version}-plugins-{type}s-{plugin}-secret_token"] +===== `secret_token` + + * Value type is <> + * There is no default value for this setting. + +Your GitHub Secret Token for the webhook + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/github-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/github-v3.0.5.asciidoc new file mode 100644 index 000000000..472d2409e --- /dev/null +++ b/docs/versioned-plugins/inputs/github-v3.0.5.asciidoc @@ -0,0 +1,81 @@ +:plugin: github +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-github/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Github input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from github webhooks + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Github Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-drop_invalid>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-secret_token>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-drop_invalid"] +===== `drop_invalid` + + * Value type is <> + * Default value is `false` + +If Secret is defined, we drop the events that don't match. +Otherwise, we'll just add an invalid tag + +[id="{version}-plugins-{type}s-{plugin}-ip"] +===== `ip` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The ip to listen on + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on + +[id="{version}-plugins-{type}s-{plugin}-secret_token"] +===== `secret_token` + + * Value type is <> + * There is no default value for this setting. + +Your GitHub Secret Token for the webhook + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/google_pubsub-index.asciidoc b/docs/versioned-plugins/inputs/google_pubsub-index.asciidoc new file mode 100644 index 000000000..c1869c577 --- /dev/null +++ b/docs/versioned-plugins/inputs/google_pubsub-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: google_pubsub +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::google_pubsub-v1.0.4.asciidoc[] +include::google_pubsub-v1.0.3.asciidoc[] +include::google_pubsub-v1.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/google_pubsub-v1.0.1.asciidoc b/docs/versioned-plugins/inputs/google_pubsub-v1.0.1.asciidoc new file mode 100644 index 000000000..e1e8636fe --- /dev/null +++ b/docs/versioned-plugins/inputs/google_pubsub-v1.0.1.asciidoc @@ -0,0 +1,213 @@ +:plugin: google_pubsub +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-google_pubsub/blob/v1.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Google_pubsub input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Author: Eric Johnson +Date: 2016-06-01 + +Copyright 2016 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +Google deps +This is a https://github.com/elastic/logstash[Logstash] input plugin for +https://cloud.google.com/pubsub/[Google Pub/Sub]. The plugin can subscribe +to a topic and ingest messages. + +The main motivation behind the development of this plugin was to ingest +https://cloud.google.com/logging/[Stackdriver Logging] messages via the +https://cloud.google.com/logging/docs/export/using_exported_logs[Exported Logs] +feature of Stackdriver Logging. + +==== Prerequisites + +You must first create a Google Cloud Platform project and enable the the +Google Pub/Sub API. If you intend to use the plugin ingest Stackdriver Logging +messages, you must also enable the Stackdriver Logging API and configure log +exporting to Pub/Sub. There is plentiful information on +https://cloud.google.com/ to get started: + +- Google Cloud Platform Projects and https://cloud.google.com/docs/overview/[Overview] +- Google Cloud Pub/Sub https://cloud.google.com/pubsub/[documentation] +- Stackdriver Logging https://cloud.google.com/logging/[documentation] + +==== Cloud Pub/Sub + +Currently, this module requires you to create a `topic` manually and specify +it in the logstash config file. You must also specify a `subscription`, but +the plugin will attempt to create the pull-based `subscription` on its own. + +All messages received from Pub/Sub will be converted to a logstash `event` +and added to the processing pipeline queue. All Pub/Sub messages will be +`acknowledged` and removed from the Pub/Sub `topic` (please see more about +https://cloud.google.com/pubsub/overview#concepts)[Pub/Sub concepts]. + +It is generally assumed that incoming messages will be in JSON and added to +the logstash `event` as-is. However, if a plain text message is received, the +plugin will return the raw text in as `raw_message` in the logstash `event`. + +==== Authentication + +You have two options for authentication depending on where you run Logstash. + +1. If you are running Logstash outside of Google Cloud Platform, then you will +need to create a Google Cloud Platform Service Account and specify the full +path to the JSON private key file in your config. You must assign sufficient +roles to the Service Account to create a subscription and to pull messages +from the subscription. Learn more about GCP Service Accounts and IAM roles +here: + + - Google Cloud Platform IAM https://cloud.google.com/iam/[overview] + - Creating Service Accounts https://cloud.google.com/iam/docs/creating-managing-service-accounts[overview] + - Granting Roles https://cloud.google.com/iam/docs/granting-roles-to-service-accounts[overview] + +1. If you are running Logstash on a Google Compute Engine instance, you may opt +to use Application Default Credentials. In this case, you will not need to +specify a JSON private key file in your config. + +==== Stackdriver Logging (optional) + +If you intend to use the logstash plugin for Stackdriver Logging message +ingestion, you must first manually set up the Export option to Cloud Pub/Sub and +the manually create the `topic`. Please see the more detailed instructions at, +https://cloud.google.com/logging/docs/export/using_exported_logs [Exported Logs] +and ensure that the https://cloud.google.com/logging/docs/export/configure_export#manual-access-pubsub[necessary permissions] +have also been manually configured. + +Logging messages from Stackdriver Logging exported to Pub/Sub are received as +JSON and converted to a logstash `event` as-is in +https://cloud.google.com/logging/docs/export/using_exported_logs#log_entries_in_google_pubsub_topics[this format]. + +==== Sample Configuration + +Below is a copy of the included `example.conf-tmpl` file that shows a basic +configuration for this plugin. + +[source,ruby] +---------------------------------- +input { + google_pubsub { + # Your GCP project id (name) + project_id => "my-project-1234" + + # The topic name below is currently hard-coded in the plugin. You + # must first create this topic by hand and ensure you are exporting + # logging to this pubsub topic. + topic => "logstash-input-dev" + + # The subscription name is customizeable. The plugin will attempt to + # create the subscription (but use the hard-coded topic name above). + subscription => "logstash-sub" + + # If you are running logstash within GCE, it will use + # Application Default Credentials and use GCE's metadata + # service to fetch tokens. However, if you are running logstash + # outside of GCE, you will need to specify the service account's + # JSON key file below. + #json_key_file => "/home/erjohnso/pkey.json" + } +} +output { stdout { codec => rubydebug } } +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Google_pubsub Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-json_key_file>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-max_messages>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-subscription>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-topic>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-json_key_file"] +===== `json_key_file` + + * Value type is <> + * There is no default value for this setting. + +If logstash is running within Google Compute Engine, the plugin will use +GCE's Application Default Credentials. Outside of GCE, you will need to +specify a Service Account JSON key file. + +[id="{version}-plugins-{type}s-{plugin}-max_messages"] +===== `max_messages` + + * This is a required setting. + * Value type is <> + * Default value is `5` + + + +[id="{version}-plugins-{type}s-{plugin}-project_id"] +===== `project_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Google Cloud Project ID (name, not number) + +[id="{version}-plugins-{type}s-{plugin}-subscription"] +===== `subscription` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-topic"] +===== `topic` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Google Cloud Pub/Sub Topic and Subscription. +Note that the topic must be created manually with Cloud Logging +pre-configured export to PubSub configured to use the defined topic. +The subscription will be created automatically by the plugin. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/google_pubsub-v1.0.3.asciidoc b/docs/versioned-plugins/inputs/google_pubsub-v1.0.3.asciidoc new file mode 100644 index 000000000..bcf677ac6 --- /dev/null +++ b/docs/versioned-plugins/inputs/google_pubsub-v1.0.3.asciidoc @@ -0,0 +1,213 @@ +:plugin: google_pubsub +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-google_pubsub/blob/v1.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Google_pubsub input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Author: Eric Johnson +Date: 2016-06-01 + +Copyright 2016 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +Google deps +This is a https://github.com/elastic/logstash[Logstash] input plugin for +https://cloud.google.com/pubsub/[Google Pub/Sub]. The plugin can subscribe +to a topic and ingest messages. + +The main motivation behind the development of this plugin was to ingest +https://cloud.google.com/logging/[Stackdriver Logging] messages via the +https://cloud.google.com/logging/docs/export/using_exported_logs[Exported Logs] +feature of Stackdriver Logging. + +==== Prerequisites + +You must first create a Google Cloud Platform project and enable the +Google Pub/Sub API. If you intend to use the plugin ingest Stackdriver Logging +messages, you must also enable the Stackdriver Logging API and configure log +exporting to Pub/Sub. There is plentiful information on +https://cloud.google.com/ to get started: + +- Google Cloud Platform Projects and https://cloud.google.com/docs/overview/[Overview] +- Google Cloud Pub/Sub https://cloud.google.com/pubsub/[documentation] +- Stackdriver Logging https://cloud.google.com/logging/[documentation] + +==== Cloud Pub/Sub + +Currently, this module requires you to create a `topic` manually and specify +it in the logstash config file. You must also specify a `subscription`, but +the plugin will attempt to create the pull-based `subscription` on its own. + +All messages received from Pub/Sub will be converted to a logstash `event` +and added to the processing pipeline queue. All Pub/Sub messages will be +`acknowledged` and removed from the Pub/Sub `topic` (please see more about +https://cloud.google.com/pubsub/overview#concepts)[Pub/Sub concepts]. + +It is generally assumed that incoming messages will be in JSON and added to +the logstash `event` as-is. However, if a plain text message is received, the +plugin will return the raw text in as `raw_message` in the logstash `event`. + +==== Authentication + +You have two options for authentication depending on where you run Logstash. + +1. If you are running Logstash outside of Google Cloud Platform, then you will +need to create a Google Cloud Platform Service Account and specify the full +path to the JSON private key file in your config. You must assign sufficient +roles to the Service Account to create a subscription and to pull messages +from the subscription. Learn more about GCP Service Accounts and IAM roles +here: + + - Google Cloud Platform IAM https://cloud.google.com/iam/[overview] + - Creating Service Accounts https://cloud.google.com/iam/docs/creating-managing-service-accounts[overview] + - Granting Roles https://cloud.google.com/iam/docs/granting-roles-to-service-accounts[overview] + +2. If you are running Logstash on a Google Compute Engine instance, you may opt +to use Application Default Credentials. In this case, you will not need to +specify a JSON private key file in your config. + +==== Stackdriver Logging (optional) + +If you intend to use the logstash plugin for Stackdriver Logging message +ingestion, you must first manually set up the Export option to Cloud Pub/Sub and +the manually create the `topic`. Please see the more detailed instructions at, +https://cloud.google.com/logging/docs/export/using_exported_logs [Exported Logs] +and ensure that the https://cloud.google.com/logging/docs/export/configure_export#manual-access-pubsub[necessary permissions] +have also been manually configured. + +Logging messages from Stackdriver Logging exported to Pub/Sub are received as +JSON and converted to a logstash `event` as-is in +https://cloud.google.com/logging/docs/export/using_exported_logs#log_entries_in_google_pubsub_topics[this format]. + +==== Sample Configuration + +Below is a copy of the included `example.conf-tmpl` file that shows a basic +configuration for this plugin. + +[source,ruby] +---------------------------------- +input { + google_pubsub { + # Your GCP project id (name) + project_id => "my-project-1234" + + # The topic name below is currently hard-coded in the plugin. You + # must first create this topic by hand and ensure you are exporting + # logging to this pubsub topic. + topic => "logstash-input-dev" + + # The subscription name is customizeable. The plugin will attempt to + # create the subscription (but use the hard-coded topic name above). + subscription => "logstash-sub" + + # If you are running logstash within GCE, it will use + # Application Default Credentials and use GCE's metadata + # service to fetch tokens. However, if you are running logstash + # outside of GCE, you will need to specify the service account's + # JSON key file below. + #json_key_file => "/home/erjohnso/pkey.json" + } +} +output { stdout { codec => rubydebug } } +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Google_pubsub Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-json_key_file>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-max_messages>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-subscription>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-topic>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-json_key_file"] +===== `json_key_file` + + * Value type is <> + * There is no default value for this setting. + +If logstash is running within Google Compute Engine, the plugin will use +GCE's Application Default Credentials. Outside of GCE, you will need to +specify a Service Account JSON key file. + +[id="{version}-plugins-{type}s-{plugin}-max_messages"] +===== `max_messages` + + * This is a required setting. + * Value type is <> + * Default value is `5` + + + +[id="{version}-plugins-{type}s-{plugin}-project_id"] +===== `project_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Google Cloud Project ID (name, not number) + +[id="{version}-plugins-{type}s-{plugin}-subscription"] +===== `subscription` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-topic"] +===== `topic` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Google Cloud Pub/Sub Topic and Subscription. +Note that the topic must be created manually with Cloud Logging +pre-configured export to PubSub configured to use the defined topic. +The subscription will be created automatically by the plugin. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/google_pubsub-v1.0.4.asciidoc b/docs/versioned-plugins/inputs/google_pubsub-v1.0.4.asciidoc new file mode 100644 index 000000000..3f869429b --- /dev/null +++ b/docs/versioned-plugins/inputs/google_pubsub-v1.0.4.asciidoc @@ -0,0 +1,213 @@ +:plugin: google_pubsub +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.4 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-google_pubsub/blob/v1.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Google_pubsub input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Author: Eric Johnson +Date: 2016-06-01 + +Copyright 2016 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +Google deps +This is a https://github.com/elastic/logstash[Logstash] input plugin for +https://cloud.google.com/pubsub/[Google Pub/Sub]. The plugin can subscribe +to a topic and ingest messages. + +The main motivation behind the development of this plugin was to ingest +https://cloud.google.com/logging/[Stackdriver Logging] messages via the +https://cloud.google.com/logging/docs/export/using_exported_logs[Exported Logs] +feature of Stackdriver Logging. + +==== Prerequisites + +You must first create a Google Cloud Platform project and enable the +Google Pub/Sub API. If you intend to use the plugin ingest Stackdriver Logging +messages, you must also enable the Stackdriver Logging API and configure log +exporting to Pub/Sub. There is plentiful information on +https://cloud.google.com/ to get started: + +- Google Cloud Platform Projects and https://cloud.google.com/docs/overview/[Overview] +- Google Cloud Pub/Sub https://cloud.google.com/pubsub/[documentation] +- Stackdriver Logging https://cloud.google.com/logging/[documentation] + +==== Cloud Pub/Sub + +Currently, this module requires you to create a `topic` manually and specify +it in the logstash config file. You must also specify a `subscription`, but +the plugin will attempt to create the pull-based `subscription` on its own. + +All messages received from Pub/Sub will be converted to a logstash `event` +and added to the processing pipeline queue. All Pub/Sub messages will be +`acknowledged` and removed from the Pub/Sub `topic` (please see more about +https://cloud.google.com/pubsub/overview#concepts)[Pub/Sub concepts]. + +It is generally assumed that incoming messages will be in JSON and added to +the logstash `event` as-is. However, if a plain text message is received, the +plugin will return the raw text in as `raw_message` in the logstash `event`. + +==== Authentication + +You have two options for authentication depending on where you run Logstash. + +1. If you are running Logstash outside of Google Cloud Platform, then you will +need to create a Google Cloud Platform Service Account and specify the full +path to the JSON private key file in your config. You must assign sufficient +roles to the Service Account to create a subscription and to pull messages +from the subscription. Learn more about GCP Service Accounts and IAM roles +here: + + - Google Cloud Platform IAM https://cloud.google.com/iam/[overview] + - Creating Service Accounts https://cloud.google.com/iam/docs/creating-managing-service-accounts[overview] + - Granting Roles https://cloud.google.com/iam/docs/granting-roles-to-service-accounts[overview] + +2. If you are running Logstash on a Google Compute Engine instance, you may opt +to use Application Default Credentials. In this case, you will not need to +specify a JSON private key file in your config. + +==== Stackdriver Logging (optional) + +If you intend to use the logstash plugin for Stackdriver Logging message +ingestion, you must first manually set up the Export option to Cloud Pub/Sub and +the manually create the `topic`. Please see the more detailed instructions at, +https://cloud.google.com/logging/docs/export/using_exported_logs [Exported Logs] +and ensure that the https://cloud.google.com/logging/docs/export/configure_export#manual-access-pubsub[necessary permissions] +have also been manually configured. + +Logging messages from Stackdriver Logging exported to Pub/Sub are received as +JSON and converted to a logstash `event` as-is in +https://cloud.google.com/logging/docs/export/using_exported_logs#log_entries_in_google_pubsub_topics[this format]. + +==== Sample Configuration + +Below is a copy of the included `example.conf-tmpl` file that shows a basic +configuration for this plugin. + +[source,ruby] +---------------------------------- +input { + google_pubsub { + # Your GCP project id (name) + project_id => "my-project-1234" + + # The topic name below is currently hard-coded in the plugin. You + # must first create this topic by hand and ensure you are exporting + # logging to this pubsub topic. + topic => "logstash-input-dev" + + # The subscription name is customizeable. The plugin will attempt to + # create the subscription (but use the hard-coded topic name above). + subscription => "logstash-sub" + + # If you are running logstash within GCE, it will use + # Application Default Credentials and use GCE's metadata + # service to fetch tokens. However, if you are running logstash + # outside of GCE, you will need to specify the service account's + # JSON key file below. + #json_key_file => "/home/erjohnso/pkey.json" + } +} +output { stdout { codec => rubydebug } } +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Google_pubsub Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-json_key_file>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-max_messages>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-subscription>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-topic>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-json_key_file"] +===== `json_key_file` + + * Value type is <> + * There is no default value for this setting. + +If logstash is running within Google Compute Engine, the plugin will use +GCE's Application Default Credentials. Outside of GCE, you will need to +specify a Service Account JSON key file. + +[id="{version}-plugins-{type}s-{plugin}-max_messages"] +===== `max_messages` + + * This is a required setting. + * Value type is <> + * Default value is `5` + + + +[id="{version}-plugins-{type}s-{plugin}-project_id"] +===== `project_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Google Cloud Project ID (name, not number) + +[id="{version}-plugins-{type}s-{plugin}-subscription"] +===== `subscription` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-topic"] +===== `topic` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Google Cloud Pub/Sub Topic and Subscription. +Note that the topic must be created manually with Cloud Logging +pre-configured export to PubSub configured to use the defined topic. +The subscription will be created automatically by the plugin. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/googleanalytics-index.asciidoc b/docs/versioned-plugins/inputs/googleanalytics-index.asciidoc new file mode 100644 index 000000000..65e18bfaa --- /dev/null +++ b/docs/versioned-plugins/inputs/googleanalytics-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: googleanalytics +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/inputs/graphite-index.asciidoc b/docs/versioned-plugins/inputs/graphite-index.asciidoc new file mode 100644 index 000000000..acdaca84a --- /dev/null +++ b/docs/versioned-plugins/inputs/graphite-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: graphite +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-21 +| <> | 2017-06-23 +|======================================================================= + +include::graphite-v3.0.4.asciidoc[] +include::graphite-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/graphite-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/graphite-v3.0.3.asciidoc new file mode 100644 index 000000000..d3965c032 --- /dev/null +++ b/docs/versioned-plugins/inputs/graphite-v3.0.3.asciidoc @@ -0,0 +1,175 @@ +:plugin: graphite +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-graphite/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Graphite input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Receive graphite metrics. This plugin understands the text-based graphite +carbon protocol. Both `N` and `specific-timestamp` forms are supported, example: +[source,ruby] + mysql.slow_query.count 204 N + haproxy.live_backends 7 1364608909 + +`N` means `now` for a timestamp. This plugin also supports having the time +specified in the metric payload: + +For every metric received from a client, a single event will be emitted with +the metric name as the field (like `mysql.slow_query.count`) and the metric +value as the field's value. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Graphite Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-data_timeout"] +===== `data_timeout` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `-1` + + + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is <> + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/graphite-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/graphite-v3.0.4.asciidoc new file mode 100644 index 000000000..87e42fda3 --- /dev/null +++ b/docs/versioned-plugins/inputs/graphite-v3.0.4.asciidoc @@ -0,0 +1,175 @@ +:plugin: graphite +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-21 +:changelog_url: https://github.com/logstash-plugins/logstash-input-graphite/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Graphite input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Receive graphite metrics. This plugin understands the text-based graphite +carbon protocol. Both `N` and `specific-timestamp` forms are supported, example: +[source,ruby] + mysql.slow_query.count 204 N + haproxy.live_backends 7 1364608909 + +`N` means `now` for a timestamp. This plugin also supports having the time +specified in the metric payload: + +For every metric received from a client, a single event will be emitted with +the metric name as the field (like `mysql.slow_query.count`) and the metric +value as the field's value. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Graphite Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-data_timeout"] +===== `data_timeout` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `-1` + + + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is <> + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/heartbeat-index.asciidoc b/docs/versioned-plugins/inputs/heartbeat-index.asciidoc new file mode 100644 index 000000000..556dcb118 --- /dev/null +++ b/docs/versioned-plugins/inputs/heartbeat-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: heartbeat +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::heartbeat-v3.0.5.asciidoc[] +include::heartbeat-v3.0.4.asciidoc[] +include::heartbeat-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/heartbeat-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/heartbeat-v3.0.3.asciidoc new file mode 100644 index 000000000..a56d6b6bc --- /dev/null +++ b/docs/versioned-plugins/inputs/heartbeat-v3.0.3.asciidoc @@ -0,0 +1,97 @@ +:plugin: heartbeat +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-heartbeat/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Heartbeat input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Generate heartbeat messages. + +The general intention of this is to test the performance and +availability of Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Heartbeat Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-count"] +===== `count` + + * Value type is <> + * Default value is `-1` + +How many times to iterate. +This is typically used only for testing purposes. + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `60` + +Set how frequently messages should be sent. + +The default, `60`, means send a message every 60 seconds. + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * Default value is `"ok"` + +The message string to use in the event. + +If you set this to `epoch` then this plugin will use the current +timestamp in unix timestamp (which is by definition, UTC). It will +output this value into a field called `clock` + +If you set this to `sequence` then this plugin will send a sequence of +numbers beginning at 0 and incrementing each interval. It will +output this value into a field called `clock` + +Otherwise, this value will be used verbatim as the event message. It +will output this value into a field called `message` + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/heartbeat-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/heartbeat-v3.0.4.asciidoc new file mode 100644 index 000000000..f9bdc4903 --- /dev/null +++ b/docs/versioned-plugins/inputs/heartbeat-v3.0.4.asciidoc @@ -0,0 +1,97 @@ +:plugin: heartbeat +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-heartbeat/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Heartbeat input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Generate heartbeat messages. + +The general intention of this is to test the performance and +availability of Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Heartbeat Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-count"] +===== `count` + + * Value type is <> + * Default value is `-1` + +How many times to iterate. +This is typically used only for testing purposes. + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `60` + +Set how frequently messages should be sent. + +The default, `60`, means send a message every 60 seconds. + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * Default value is `"ok"` + +The message string to use in the event. + +If you set this to `epoch` then this plugin will use the current +timestamp in unix timestamp (which is by definition, UTC). It will +output this value into a field called `clock` + +If you set this to `sequence` then this plugin will send a sequence of +numbers beginning at 0 and incrementing each interval. It will +output this value into a field called `clock` + +Otherwise, this value will be used verbatim as the event message. It +will output this value into a field called `message` + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/heartbeat-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/heartbeat-v3.0.5.asciidoc new file mode 100644 index 000000000..06d67e436 --- /dev/null +++ b/docs/versioned-plugins/inputs/heartbeat-v3.0.5.asciidoc @@ -0,0 +1,97 @@ +:plugin: heartbeat +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-heartbeat/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Heartbeat input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Generate heartbeat messages. + +The general intention of this is to test the performance and +availability of Logstash. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Heartbeat Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-count"] +===== `count` + + * Value type is <> + * Default value is `-1` + +How many times to iterate. +This is typically used only for testing purposes. + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `60` + +Set how frequently messages should be sent. + +The default, `60`, means send a message every 60 seconds. + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * Default value is `"ok"` + +The message string to use in the event. + +If you set this to `epoch` then this plugin will use the current +timestamp in unix timestamp (which is by definition, UTC). It will +output this value into a field called `clock` + +If you set this to `sequence` then this plugin will send a sequence of +numbers beginning at 0 and incrementing each interval. It will +output this value into a field called `clock` + +Otherwise, this value will be used verbatim as the event message. It +will output this value into a field called `message` + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/heroku-index.asciidoc b/docs/versioned-plugins/inputs/heroku-index.asciidoc new file mode 100644 index 000000000..3b78e80db --- /dev/null +++ b/docs/versioned-plugins/inputs/heroku-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: heroku +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::heroku-v3.0.2.asciidoc[] +include::heroku-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/heroku-v3.0.1.asciidoc b/docs/versioned-plugins/inputs/heroku-v3.0.1.asciidoc new file mode 100644 index 000000000..54f20d59f --- /dev/null +++ b/docs/versioned-plugins/inputs/heroku-v3.0.1.asciidoc @@ -0,0 +1,66 @@ +:plugin: heroku +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-heroku/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Heroku input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from a heroku app's logs. + +This will read events in a manner similar to how the `heroku logs -t` command +fetches logs. + +Recommended filters: +[source,ruby] + filter { + grok { + pattern => "^%{TIMESTAMP_ISO8601:timestamp} %{WORD:component}\[%{WORD:process}(?:\.%{INT:instance:int})?\]: %{DATA:message}$" + } + date { timestamp => ISO8601 } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Heroku Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-app>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-app"] +===== `app` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of your heroku application. This is usually the first part of the +the domain name `my-app-name.herokuapp.com` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/heroku-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/heroku-v3.0.2.asciidoc new file mode 100644 index 000000000..cce0b1073 --- /dev/null +++ b/docs/versioned-plugins/inputs/heroku-v3.0.2.asciidoc @@ -0,0 +1,66 @@ +:plugin: heroku +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-heroku/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Heroku input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from a heroku app's logs. + +This will read events in a manner similar to how the `heroku logs -t` command +fetches logs. + +Recommended filters: +[source,ruby] + filter { + grok { + pattern => "^%{TIMESTAMP_ISO8601:timestamp} %{WORD:component}\[%{WORD:process}(?:\.%{INT:instance:int})?\]: %{DATA:message}$" + } + date { timestamp => ISO8601 } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Heroku Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-app>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-app"] +===== `app` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of your heroku application. This is usually the first part of the +the domain name `my-app-name.herokuapp.com` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http-index.asciidoc b/docs/versioned-plugins/inputs/http-index.asciidoc new file mode 100644 index 000000000..f2530b53b --- /dev/null +++ b/docs/versioned-plugins/inputs/http-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: http +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-09 +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::http-v3.0.8.asciidoc[] +include::http-v3.0.7.asciidoc[] +include::http-v3.0.6.asciidoc[] +include::http-v3.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/http-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/http-v3.0.5.asciidoc new file mode 100644 index 000000000..17f030c6f --- /dev/null +++ b/docs/versioned-plugins/inputs/http-v3.0.5.asciidoc @@ -0,0 +1,168 @@ +:plugin: http +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Http input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Using this input you can receive single or multiline events over http(s). +Applications can send an HTTP POST request with a body to the endpoint started by this +input and Logstash will convert it into an event for subsequent processing. Users +can pass plain text, JSON, or any formatted data and use a corresponding codec with this +input. For Content-Type `application/json` the `json` codec is used, but for all other +data formats, `plain` codec is used. + +This input can also be used to receive webhook requests to integrate with other services +and applications. By taking advantage of the vast plugin ecosystem available in Logstash +you can trigger actionable events right from your application. + +==== Security +This plugin supports standard HTTP basic authentication headers to identify the requester. +You can pass in a username, password combination while sending data to this input + +You can also setup SSL and send data securely over https, with an option of validating +the client's certificate. Currently, the certificate setup is through +https://docs.oracle.com/cd/E19509-01/820-3503/ggfen/index.html[Java Keystore +format] + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-additional_codecs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-response_headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-additional_codecs"] +===== `additional_codecs` + + * Value type is <> + * Default value is `{"application/json"=>"json"}` + +Apply specific codecs for specific content types. +The default codec will be applied only after this list is checked +and no codec for the request's content-type is found + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +Codec used to decode the incoming data. +This codec will be used as a fall-back if the content-type +is not found in the "additional_codecs" hash +The host or ip to bind + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The JKS keystore to validate the client's certificates + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password for basic authorization + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8080` + +The TCP port to bind to + +[id="{version}-plugins-{type}s-{plugin}-response_headers"] +===== `response_headers` + + * Value type is <> + * Default value is `{"Content-Type"=>"text/plain"}` + +specify a custom set of response headers + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +SSL Configurations + +Enable SSL + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `4` + +Maximum number of threads to use + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username for basic authorization + +[id="{version}-plugins-{type}s-{plugin}-verify_mode"] +===== `verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +Set the client certificate verification method. Valid methods: none, peer, force_peer + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/http-v3.0.6.asciidoc new file mode 100644 index 000000000..4bd269320 --- /dev/null +++ b/docs/versioned-plugins/inputs/http-v3.0.6.asciidoc @@ -0,0 +1,168 @@ +:plugin: http +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Using this input you can receive single or multiline events over http(s). +Applications can send an HTTP POST request with a body to the endpoint started by this +input and Logstash will convert it into an event for subsequent processing. Users +can pass plain text, JSON, or any formatted data and use a corresponding codec with this +input. For Content-Type `application/json` the `json` codec is used, but for all other +data formats, `plain` codec is used. + +This input can also be used to receive webhook requests to integrate with other services +and applications. By taking advantage of the vast plugin ecosystem available in Logstash +you can trigger actionable events right from your application. + +==== Security +This plugin supports standard HTTP basic authentication headers to identify the requester. +You can pass in a username, password combination while sending data to this input + +You can also setup SSL and send data securely over https, with an option of validating +the client's certificate. Currently, the certificate setup is through +https://docs.oracle.com/cd/E19509-01/820-3503/ggfen/index.html[Java Keystore +format] + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-additional_codecs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-response_headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-additional_codecs"] +===== `additional_codecs` + + * Value type is <> + * Default value is `{"application/json"=>"json"}` + +Apply specific codecs for specific content types. +The default codec will be applied only after this list is checked +and no codec for the request's content-type is found + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +Codec used to decode the incoming data. +This codec will be used as a fall-back if the content-type +is not found in the "additional_codecs" hash +The host or ip to bind + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The JKS keystore to validate the client's certificates + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password for basic authorization + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8080` + +The TCP port to bind to + +[id="{version}-plugins-{type}s-{plugin}-response_headers"] +===== `response_headers` + + * Value type is <> + * Default value is `{"Content-Type"=>"text/plain"}` + +specify a custom set of response headers + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +SSL Configurations + +Enable SSL + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `4` + +Maximum number of threads to use + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username for basic authorization + +[id="{version}-plugins-{type}s-{plugin}-verify_mode"] +===== `verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +Set the client certificate verification method. Valid methods: none, peer, force_peer + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http-v3.0.7.asciidoc b/docs/versioned-plugins/inputs/http-v3.0.7.asciidoc new file mode 100644 index 000000000..a4526c066 --- /dev/null +++ b/docs/versioned-plugins/inputs/http-v3.0.7.asciidoc @@ -0,0 +1,165 @@ +:plugin: http +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.7 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http/blob/v3.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Using this input you can receive single or multiline events over http(s). +Applications can send an HTTP POST request with a body to the endpoint started by this +input and Logstash will convert it into an event for subsequent processing. Users +can pass plain text, JSON, or any formatted data and use a corresponding codec with this +input. For Content-Type `application/json` the `json` codec is used, but for all other +data formats, `plain` codec is used. + +This input can also be used to receive webhook requests to integrate with other services +and applications. By taking advantage of the vast plugin ecosystem available in Logstash +you can trigger actionable events right from your application. + +==== Security +This plugin supports standard HTTP basic authentication headers to identify the requester. +You can pass in a username, password combination while sending data to this input + +You can also setup SSL and send data securely over https, with an option of validating +the client's certificate. Currently, the certificate setup is through +https://docs.oracle.com/cd/E19509-01/820-3503/ggfen/index.html[Java Keystore +format] + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-additional_codecs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-response_headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-additional_codecs"] +===== `additional_codecs` + + * Value type is <> + * Default value is `{"application/json"=>"json"}` + +Apply specific codecs for specific content types. +The default codec will be applied only after this list is checked +and no codec for the request's content-type is found + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The host or ip to bind + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The JKS keystore to validate the client's certificates + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password for basic authorization + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8080` + +The TCP port to bind to + +[id="{version}-plugins-{type}s-{plugin}-response_headers"] +===== `response_headers` + + * Value type is <> + * Default value is `{"Content-Type"=>"text/plain"}` + +specify a custom set of response headers + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +SSL Configurations + +Enable SSL + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `4` + +Maximum number of threads to use + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username for basic authorization + +[id="{version}-plugins-{type}s-{plugin}-verify_mode"] +===== `verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +Set the client certificate verification method. Valid methods: none, peer, force_peer + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http-v3.0.8.asciidoc b/docs/versioned-plugins/inputs/http-v3.0.8.asciidoc new file mode 100644 index 000000000..01d5d9f35 --- /dev/null +++ b/docs/versioned-plugins/inputs/http-v3.0.8.asciidoc @@ -0,0 +1,178 @@ +:plugin: http +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.8 +:release_date: 2017-12-09 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http/blob/v3.0.8/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Using this input you can receive single or multiline events over http(s). +Applications can send an HTTP POST request with a body to the endpoint started by this +input and Logstash will convert it into an event for subsequent processing. Users +can pass plain text, JSON, or any formatted data and use a corresponding codec with this +input. For Content-Type `application/json` the `json` codec is used, but for all other +data formats, `plain` codec is used. + +This input can also be used to receive webhook requests to integrate with other services +and applications. By taking advantage of the vast plugin ecosystem available in Logstash +you can trigger actionable events right from your application. + +==== Blocking Behavior + +The HTTP protocol doesn't deal well with long running requests. This plugin will either return +a 429 (busy) error when Logstash is backlogged, or it will time out the request. + +If a 429 error is encountered clients should sleep, backing off exponentially with some random +jitter, then retry their request. + +This plugin will block if the Logstash queue is blocked and there are available HTTP input threads. +This will cause most HTTP clients to time out. Sent events will still be processed in this case. This +behavior is not optimal and will be changed in a future release. In the future, this plugin will always +return a 429 if the queue is busy, and will not time out in the event of a busy queue. + +==== Security +This plugin supports standard HTTP basic authentication headers to identify the requester. +You can pass in a username, password combination while sending data to this input + +You can also setup SSL and send data securely over https, with an option of validating +the client's certificate. Currently, the certificate setup is through +https://docs.oracle.com/cd/E19509-01/820-3503/ggfen/index.html[Java Keystore +format] + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-additional_codecs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-response_headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-additional_codecs"] +===== `additional_codecs` + + * Value type is <> + * Default value is `{"application/json"=>"json"}` + +Apply specific codecs for specific content types. +The default codec will be applied only after this list is checked +and no codec for the request's content-type is found + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The host or ip to bind + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The JKS keystore to validate the client's certificates + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password for basic authorization + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8080` + +The TCP port to bind to + +[id="{version}-plugins-{type}s-{plugin}-response_headers"] +===== `response_headers` + + * Value type is <> + * Default value is `{"Content-Type"=>"text/plain"}` + +specify a custom set of response headers + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +SSL Configurations + +Enable SSL + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `4` + +Maximum number of threads to use + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username for basic authorization + +[id="{version}-plugins-{type}s-{plugin}-verify_mode"] +===== `verify_mode` + + * Value can be any of: `none`, `peer`, `force_peer` + * Default value is `"none"` + +Set the client certificate verification method. Valid methods: none, peer, force_peer + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-index.asciidoc b/docs/versioned-plugins/inputs/http_poller-index.asciidoc new file mode 100644 index 000000000..ecad8e9c7 --- /dev/null +++ b/docs/versioned-plugins/inputs/http_poller-index.asciidoc @@ -0,0 +1,30 @@ +:plugin: http_poller +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-09-20 +| <> | 2017-09-07 +| <> | 2017-08-15 +| <> | 2017-08-02 +| <> | 2017-09-21 +| <> | 2017-09-07 +| <> | 2017-08-18 +| <> | 2017-06-23 +| <> | 2017-05-08 +|======================================================================= + +include::http_poller-v4.0.4.asciidoc[] +include::http_poller-v4.0.3.asciidoc[] +include::http_poller-v4.0.2.asciidoc[] +include::http_poller-v4.0.1.asciidoc[] +include::http_poller-v4.0.0.asciidoc[] +include::http_poller-v3.3.4.asciidoc[] +include::http_poller-v3.3.3.asciidoc[] +include::http_poller-v3.3.2.asciidoc[] +include::http_poller-v3.3.1.asciidoc[] +include::http_poller-v3.3.0.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/http_poller-v3.3.0.asciidoc b/docs/versioned-plugins/inputs/http_poller-v3.3.0.asciidoc new file mode 100644 index 000000000..1ae935274 --- /dev/null +++ b/docs/versioned-plugins/inputs/http_poller-v3.3.0.asciidoc @@ -0,0 +1,400 @@ +:plugin: http_poller +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.3.0 +:release_date: 2017-05-08 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v3.3.0/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Http_poller + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and +send them on their merry way. The idea behind this plugins came from a need to read springboot +metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. + +==== Example +Reads from a list of urls and decodes the body of the response with a codec. +The config should look like this: + +[source,ruby] +---------------------------------- +input { + http_poller { + urls => { + test1 => "http://localhost:9200" + test2 => { + # Supports all options supported by ruby's Manticore HTTP client + method => get + user => "AzureDiamond" + password => "hunter2" + url => "http://localhost:9200/_cluster/health" + headers => { + Accept => "application/json" + } + } + } + request_timeout => 60 + # Supports "cron", "every", "at" and "in" schedules by rufus scheduler + schedule => { cron => "* * * * * UTC"} + codec => "json" + # A hash of request metadata info (timing, response headers, etc.) will be sent here + metadata_target => "http_poller_metadata" + } +} + +output { + stdout { + codec => rubydebug + } +} +---------------------------------- + +Using the HTTP poller with custom a custom CA or self signed cert. + +If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. + +[source,ruby] +---------------------------------- +openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks +---------------------------------- + +The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. + + +[source,ruby] +---------------------------------- + http_poller { + urls => { + myurl => "https://myhostname:1234" + } + truststore => "/path/to/downloaded_truststore.jks" + truststore_password => "mypassword" + interval => 30 + } +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http_poller Input Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. +If you set this you must also set the `password` option. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to be used in conjunction with the username for HTTP authentication. + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +How often (in seconds) the urls will be called +DEPRECATED. Use 'schedule' option instead. +If both interval and schedule options are specified, interval +option takes higher precedence + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-metadata_target"] +===== `metadata_target` + + * Value type is <> + * Default value is `"@metadata"` + +If you'd like to work with the request/response metadata. +Set this value to the name of the field you'd like to store a nested +hash of metadata. + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically poll from the urls +Format: A hash with + + key: "cron" | "every" | "in" | "at" + + value: string +Examples: + a) { "every" => "1h" } + b) { "cron" => "* * * * * UTC" } +See: rufus/scheduler for details about different schedule options and value string format + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] +===== `ssl_certificate_validation` + + * Value type is <> + * Default value is `true` + +Set this to false to disable SSL/TLS certificate validation +Note: setting this to false is generally considered insecure! + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-urls"] +===== `urls` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A Hash of urls in this format : `"name" => "url"`. +The name and the url will be passed in the outputed event + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +# You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/http_poller-v3.3.1.asciidoc b/docs/versioned-plugins/inputs/http_poller-v3.3.1.asciidoc new file mode 100644 index 000000000..cefa22c27 --- /dev/null +++ b/docs/versioned-plugins/inputs/http_poller-v3.3.1.asciidoc @@ -0,0 +1,401 @@ +:plugin: http_poller +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.3.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v3.3.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Http_poller input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and +send them on their merry way. The idea behind this plugins came from a need to read springboot +metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. + +==== Example +Reads from a list of urls and decodes the body of the response with a codec. +The config should look like this: + +[source,ruby] +---------------------------------- +input { + http_poller { + urls => { + test1 => "http://localhost:9200" + test2 => { + # Supports all options supported by ruby's Manticore HTTP client + method => get + user => "AzureDiamond" + password => "hunter2" + url => "http://localhost:9200/_cluster/health" + headers => { + Accept => "application/json" + } + } + } + request_timeout => 60 + # Supports "cron", "every", "at" and "in" schedules by rufus scheduler + schedule => { cron => "* * * * * UTC"} + codec => "json" + # A hash of request metadata info (timing, response headers, etc.) will be sent here + metadata_target => "http_poller_metadata" + } +} + +output { + stdout { + codec => rubydebug + } +} +---------------------------------- + +Using the HTTP poller with custom a custom CA or self signed cert. + +If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. + +[source,ruby] +---------------------------------- +openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks +---------------------------------- + +The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. + + +[source,ruby] +---------------------------------- + http_poller { + urls => { + myurl => "https://myhostname:1234" + } + truststore => "/path/to/downloaded_truststore.jks" + truststore_password => "mypassword" + interval => 30 + } +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http_poller Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. +If you set this you must also set the `password` option. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to be used in conjunction with the username for HTTP authentication. + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +How often (in seconds) the urls will be called +DEPRECATED. Use 'schedule' option instead. +If both interval and schedule options are specified, interval +option takes higher precedence + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-metadata_target"] +===== `metadata_target` + + * Value type is <> + * Default value is `"@metadata"` + +If you'd like to work with the request/response metadata. +Set this value to the name of the field you'd like to store a nested +hash of metadata. + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically poll from the urls +Format: A hash with + + key: "cron" | "every" | "in" | "at" + + value: string +Examples: + a) { "every" => "1h" } + b) { "cron" => "* * * * * UTC" } +See: rufus/scheduler for details about different schedule options and value string format + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] +===== `ssl_certificate_validation` + + * Value type is <> + * Default value is `true` + +Set this to false to disable SSL/TLS certificate validation +Note: setting this to false is generally considered insecure! + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-urls"] +===== `urls` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A Hash of urls in this format : `"name" => "url"`. +The name and the url will be passed in the outputed event + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +# You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-v3.3.2.asciidoc b/docs/versioned-plugins/inputs/http_poller-v3.3.2.asciidoc new file mode 100644 index 000000000..0d7e63035 --- /dev/null +++ b/docs/versioned-plugins/inputs/http_poller-v3.3.2.asciidoc @@ -0,0 +1,401 @@ +:plugin: http_poller +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.3.2 +:release_date: 2017-08-18 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v3.3.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http_poller input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and +send them on their merry way. The idea behind this plugins came from a need to read springboot +metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. + +==== Example +Reads from a list of urls and decodes the body of the response with a codec. +The config should look like this: + +[source,ruby] +---------------------------------- +input { + http_poller { + urls => { + test1 => "http://localhost:9200" + test2 => { + # Supports all options supported by ruby's Manticore HTTP client + method => get + user => "AzureDiamond" + password => "hunter2" + url => "http://localhost:9200/_cluster/health" + headers => { + Accept => "application/json" + } + } + } + request_timeout => 60 + # Supports "cron", "every", "at" and "in" schedules by rufus scheduler + schedule => { cron => "* * * * * UTC"} + codec => "json" + # A hash of request metadata info (timing, response headers, etc.) will be sent here + metadata_target => "http_poller_metadata" + } +} + +output { + stdout { + codec => rubydebug + } +} +---------------------------------- + +Using the HTTP poller with custom a custom CA or self signed cert. + +If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. + +[source,ruby] +---------------------------------- +openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks +---------------------------------- + +The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. + + +[source,ruby] +---------------------------------- + http_poller { + urls => { + myurl => "https://myhostname:1234" + } + truststore => "/path/to/downloaded_truststore.jks" + truststore_password => "mypassword" + interval => 30 + } +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http_poller Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. +If you set this you must also set the `password` option. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to be used in conjunction with the username for HTTP authentication. + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +How often (in seconds) the urls will be called +DEPRECATED. Use 'schedule' option instead. +If both interval and schedule options are specified, interval +option takes higher precedence + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-metadata_target"] +===== `metadata_target` + + * Value type is <> + * Default value is `"@metadata"` + +If you'd like to work with the request/response metadata. +Set this value to the name of the field you'd like to store a nested +hash of metadata. + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically poll from the urls +Format: A hash with + + key: "cron" | "every" | "in" | "at" + + value: string +Examples: + a) { "every" => "1h" } + b) { "cron" => "* * * * * UTC" } +See: rufus/scheduler for details about different schedule options and value string format + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] +===== `ssl_certificate_validation` + + * Value type is <> + * Default value is `true` + +Set this to false to disable SSL/TLS certificate validation +Note: setting this to false is generally considered insecure! + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-urls"] +===== `urls` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A Hash of urls in this format : `"name" => "url"`. +The name and the url will be passed in the outputed event + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +# You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-v3.3.3.asciidoc b/docs/versioned-plugins/inputs/http_poller-v3.3.3.asciidoc new file mode 100644 index 000000000..3f638b9d5 --- /dev/null +++ b/docs/versioned-plugins/inputs/http_poller-v3.3.3.asciidoc @@ -0,0 +1,401 @@ +:plugin: http_poller +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.3.3 +:release_date: 2017-09-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v3.3.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http_poller input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and +send them on their merry way. The idea behind this plugins came from a need to read springboot +metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. + +==== Example +Reads from a list of urls and decodes the body of the response with a codec. +The config should look like this: + +[source,ruby] +---------------------------------- +input { + http_poller { + urls => { + test1 => "http://localhost:9200" + test2 => { + # Supports all options supported by ruby's Manticore HTTP client + method => get + user => "AzureDiamond" + password => "hunter2" + url => "http://localhost:9200/_cluster/health" + headers => { + Accept => "application/json" + } + } + } + request_timeout => 60 + # Supports "cron", "every", "at" and "in" schedules by rufus scheduler + schedule => { cron => "* * * * * UTC"} + codec => "json" + # A hash of request metadata info (timing, response headers, etc.) will be sent here + metadata_target => "http_poller_metadata" + } +} + +output { + stdout { + codec => rubydebug + } +} +---------------------------------- + +Using the HTTP poller with custom a custom CA or self signed cert. + +If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. + +[source,ruby] +---------------------------------- +openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks +---------------------------------- + +The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. + + +[source,ruby] +---------------------------------- + http_poller { + urls => { + myurl => "https://myhostname:1234" + } + truststore => "/path/to/downloaded_truststore.jks" + truststore_password => "mypassword" + interval => 30 + } +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http_poller Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. +If you set this you must also set the `password` option. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to be used in conjunction with the username for HTTP authentication. + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +How often (in seconds) the urls will be called +DEPRECATED. Use 'schedule' option instead. +If both interval and schedule options are specified, interval +option takes higher precedence + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-metadata_target"] +===== `metadata_target` + + * Value type is <> + * Default value is `"@metadata"` + +If you'd like to work with the request/response metadata. +Set this value to the name of the field you'd like to store a nested +hash of metadata. + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically poll from the urls +Format: A hash with + + key: "cron" | "every" | "in" | "at" + + value: string +Examples: + a) { "every" => "1h" } + b) { "cron" => "* * * * * UTC" } +See: rufus/scheduler for details about different schedule options and value string format + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] +===== `ssl_certificate_validation` + + * Value type is <> + * Default value is `true` + +Set this to false to disable SSL/TLS certificate validation +Note: setting this to false is generally considered insecure! + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-urls"] +===== `urls` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A Hash of urls in this format : `"name" => "url"`. +The name and the url will be passed in the outputed event + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +# You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-v3.3.4.asciidoc b/docs/versioned-plugins/inputs/http_poller-v3.3.4.asciidoc new file mode 100644 index 000000000..7bbad1df5 --- /dev/null +++ b/docs/versioned-plugins/inputs/http_poller-v3.3.4.asciidoc @@ -0,0 +1,391 @@ +:plugin: http_poller +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.3.4 +:release_date: 2017-09-21 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v3.3.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http_poller input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and +send them on their merry way. The idea behind this plugins came from a need to read springboot +metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. + +==== Example +Reads from a list of urls and decodes the body of the response with a codec. +The config should look like this: + +[source,ruby] +---------------------------------- +input { + http_poller { + urls => { + test1 => "http://localhost:9200" + test2 => { + # Supports all options supported by ruby's Manticore HTTP client + method => get + user => "AzureDiamond" + password => "hunter2" + url => "http://localhost:9200/_cluster/health" + headers => { + Accept => "application/json" + } + } + } + request_timeout => 60 + # Supports "cron", "every", "at" and "in" schedules by rufus scheduler + schedule => { cron => "* * * * * UTC"} + codec => "json" + # A hash of request metadata info (timing, response headers, etc.) will be sent here + metadata_target => "http_poller_metadata" + } +} + +output { + stdout { + codec => rubydebug + } +} +---------------------------------- + +Using the HTTP poller with custom a custom CA or self signed cert. + +If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. + +[source,ruby] +---------------------------------- +openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks +---------------------------------- + +The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. + + +[source,ruby] +---------------------------------- + http_poller { + urls => { + myurl => "https://myhostname:1234" + } + truststore => "/path/to/downloaded_truststore.jks" + truststore_password => "mypassword" + interval => 30 + } +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http_poller Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. +If you set this you must also set the `password` option. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to be used in conjunction with the username for HTTP authentication. + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +How often (in seconds) the urls will be called +DEPRECATED. Use 'schedule' option instead. +If both interval and schedule options are specified, interval +option takes higher precedence + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-metadata_target"] +===== `metadata_target` + + * Value type is <> + * Default value is `"@metadata"` + +If you'd like to work with the request/response metadata. +Set this value to the name of the field you'd like to store a nested +hash of metadata. + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically poll from the urls +Format: A hash with + + key: "cron" | "every" | "in" | "at" + + value: string +Examples: + a) { "every" => "1h" } + b) { "cron" => "* * * * * UTC" } +See: rufus/scheduler for details about different schedule options and value string format + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-urls"] +===== `urls` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A Hash of urls in this format : `"name" => "url"`. +The name and the url will be passed in the outputed event + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +# You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-v4.0.0.asciidoc b/docs/versioned-plugins/inputs/http_poller-v4.0.0.asciidoc new file mode 100644 index 000000000..fa381e247 --- /dev/null +++ b/docs/versioned-plugins/inputs/http_poller-v4.0.0.asciidoc @@ -0,0 +1,380 @@ +:plugin: http_poller +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.0 +:release_date: 2017-08-02 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v4.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http_poller input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and +send them on their merry way. The idea behind this plugins came from a need to read springboot +metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. + +==== Example +Reads from a list of urls and decodes the body of the response with a codec. +The config should look like this: + +[source,ruby] +---------------------------------- +input { + http_poller { + urls => { + test1 => "http://localhost:9200" + test2 => { + # Supports all options supported by ruby's Manticore HTTP client + method => get + user => "AzureDiamond" + password => "hunter2" + url => "http://localhost:9200/_cluster/health" + headers => { + Accept => "application/json" + } + } + } + request_timeout => 60 + # Supports "cron", "every", "at" and "in" schedules by rufus scheduler + schedule => { cron => "* * * * * UTC"} + codec => "json" + # A hash of request metadata info (timing, response headers, etc.) will be sent here + metadata_target => "http_poller_metadata" + } +} + +output { + stdout { + codec => rubydebug + } +} +---------------------------------- + +Using the HTTP poller with custom a custom CA or self signed cert. + +If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. + +[source,ruby] +---------------------------------- +openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks +---------------------------------- + +The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. + + +[source,ruby] +---------------------------------- + http_poller { + urls => { + myurl => "https://myhostname:1234" + } + truststore => "/path/to/downloaded_truststore.jks" + truststore_password => "mypassword" + schedule => { cron => "* * * * * UTC"} + } +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http_poller Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. +If you set this you must also set the `password` option. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to be used in conjunction with the username for HTTP authentication. + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-metadata_target"] +===== `metadata_target` + + * Value type is <> + * Default value is `"@metadata"` + +If you'd like to work with the request/response metadata. +Set this value to the name of the field you'd like to store a nested +hash of metadata. + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically poll from the urls +Format: A hash with + + key: "cron" | "every" | "in" | "at" + + value: string +Examples: + a) { "every" => "1h" } + b) { "cron" => "* * * * * UTC" } +See: rufus/scheduler for details about different schedule options and value string format + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-urls"] +===== `urls` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A Hash of urls in this format : `"name" => "url"`. +The name and the url will be passed in the outputed event + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +# You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-v4.0.1.asciidoc b/docs/versioned-plugins/inputs/http_poller-v4.0.1.asciidoc new file mode 100644 index 000000000..2b333402e --- /dev/null +++ b/docs/versioned-plugins/inputs/http_poller-v4.0.1.asciidoc @@ -0,0 +1,380 @@ +:plugin: http_poller +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.1 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v4.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http_poller input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and +send them on their merry way. The idea behind this plugins came from a need to read springboot +metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. + +==== Example +Reads from a list of urls and decodes the body of the response with a codec. +The config should look like this: + +[source,ruby] +---------------------------------- +input { + http_poller { + urls => { + test1 => "http://localhost:9200" + test2 => { + # Supports all options supported by ruby's Manticore HTTP client + method => get + user => "AzureDiamond" + password => "hunter2" + url => "http://localhost:9200/_cluster/health" + headers => { + Accept => "application/json" + } + } + } + request_timeout => 60 + # Supports "cron", "every", "at" and "in" schedules by rufus scheduler + schedule => { cron => "* * * * * UTC"} + codec => "json" + # A hash of request metadata info (timing, response headers, etc.) will be sent here + metadata_target => "http_poller_metadata" + } +} + +output { + stdout { + codec => rubydebug + } +} +---------------------------------- + +Using the HTTP poller with custom a custom CA or self signed cert. + +If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. + +[source,ruby] +---------------------------------- +openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks +---------------------------------- + +The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. + + +[source,ruby] +---------------------------------- + http_poller { + urls => { + myurl => "https://myhostname:1234" + } + truststore => "/path/to/downloaded_truststore.jks" + truststore_password => "mypassword" + schedule => { cron => "* * * * * UTC"} + } +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http_poller Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. +If you set this you must also set the `password` option. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to be used in conjunction with the username for HTTP authentication. + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-metadata_target"] +===== `metadata_target` + + * Value type is <> + * Default value is `"@metadata"` + +If you'd like to work with the request/response metadata. +Set this value to the name of the field you'd like to store a nested +hash of metadata. + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically poll from the urls +Format: A hash with + + key: "cron" | "every" | "in" | "at" + + value: string +Examples: + a) { "every" => "1h" } + b) { "cron" => "* * * * * UTC" } +See: rufus/scheduler for details about different schedule options and value string format + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-urls"] +===== `urls` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A Hash of urls in this format : `"name" => "url"`. +The name and the url will be passed in the outputed event + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +# You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-v4.0.2.asciidoc b/docs/versioned-plugins/inputs/http_poller-v4.0.2.asciidoc new file mode 100644 index 000000000..077a47ed0 --- /dev/null +++ b/docs/versioned-plugins/inputs/http_poller-v4.0.2.asciidoc @@ -0,0 +1,380 @@ +:plugin: http_poller +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.2 +:release_date: 2017-09-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v4.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http_poller input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and +send them on their merry way. The idea behind this plugins came from a need to read springboot +metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. + +==== Example +Reads from a list of urls and decodes the body of the response with a codec. +The config should look like this: + +[source,ruby] +---------------------------------- +input { + http_poller { + urls => { + test1 => "http://localhost:9200" + test2 => { + # Supports all options supported by ruby's Manticore HTTP client + method => get + user => "AzureDiamond" + password => "hunter2" + url => "http://localhost:9200/_cluster/health" + headers => { + Accept => "application/json" + } + } + } + request_timeout => 60 + # Supports "cron", "every", "at" and "in" schedules by rufus scheduler + schedule => { cron => "* * * * * UTC"} + codec => "json" + # A hash of request metadata info (timing, response headers, etc.) will be sent here + metadata_target => "http_poller_metadata" + } +} + +output { + stdout { + codec => rubydebug + } +} +---------------------------------- + +Using the HTTP poller with custom a custom CA or self signed cert. + +If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. + +[source,ruby] +---------------------------------- +openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks +---------------------------------- + +The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. + + +[source,ruby] +---------------------------------- + http_poller { + urls => { + myurl => "https://myhostname:1234" + } + truststore => "/path/to/downloaded_truststore.jks" + truststore_password => "mypassword" + schedule => { cron => "* * * * * UTC"} + } +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http_poller Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. +If you set this you must also set the `password` option. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to be used in conjunction with the username for HTTP authentication. + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-metadata_target"] +===== `metadata_target` + + * Value type is <> + * Default value is `"@metadata"` + +If you'd like to work with the request/response metadata. +Set this value to the name of the field you'd like to store a nested +hash of metadata. + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically poll from the urls +Format: A hash with + + key: "cron" | "every" | "in" | "at" + + value: string +Examples: + a) { "every" => "1h" } + b) { "cron" => "* * * * * UTC" } +See: rufus/scheduler for details about different schedule options and value string format + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-urls"] +===== `urls` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A Hash of urls in this format : `"name" => "url"`. +The name and the url will be passed in the outputed event + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +# You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-v4.0.3.asciidoc b/docs/versioned-plugins/inputs/http_poller-v4.0.3.asciidoc new file mode 100644 index 000000000..51b6ad95f --- /dev/null +++ b/docs/versioned-plugins/inputs/http_poller-v4.0.3.asciidoc @@ -0,0 +1,379 @@ +:plugin: http_poller +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.3 +:release_date: 2017-09-20 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v4.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http_poller input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and +send them on their merry way. The idea behind this plugins came from a need to read springboot +metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. + +==== Example +Reads from a list of urls and decodes the body of the response with a codec. +The config should look like this: + +[source,ruby] +---------------------------------- +input { + http_poller { + urls => { + test1 => "http://localhost:9200" + test2 => { + # Supports all options supported by ruby's Manticore HTTP client + method => get + user => "AzureDiamond" + password => "hunter2" + url => "http://localhost:9200/_cluster/health" + headers => { + Accept => "application/json" + } + } + } + request_timeout => 60 + # Supports "cron", "every", "at" and "in" schedules by rufus scheduler + schedule => { cron => "* * * * * UTC"} + codec => "json" + # A hash of request metadata info (timing, response headers, etc.) will be sent here + metadata_target => "http_poller_metadata" + } +} + +output { + stdout { + codec => rubydebug + } +} +---------------------------------- + +Using the HTTP poller with custom a custom CA or self signed cert. + +If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. + +[source,ruby] +---------------------------------- +openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks +---------------------------------- + +The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. + + +[source,ruby] +---------------------------------- + http_poller { + urls => { + myurl => "https://myhostname:1234" + } + truststore => "/path/to/downloaded_truststore.jks" + truststore_password => "mypassword" + schedule => { cron => "* * * * * UTC"} + } +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http_poller Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. +If you set this you must also set the `password` option. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to be used in conjunction with the username for HTTP authentication. + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-metadata_target"] +===== `metadata_target` + + * Value type is <> + * Default value is `"@metadata"` + +If you'd like to work with the request/response metadata. +Set this value to the name of the field you'd like to store a nested +hash of metadata. + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically poll from the urls +Format: A hash with + + key: "cron" | "every" | "in" | "at" + + value: string +Examples: + a) { "every" => "1h" } + b) { "cron" => "* * * * * UTC" } +See: rufus/scheduler for details about different schedule options and value string format + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-urls"] +===== `urls` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A Hash of urls in this format : `"name" => "url"`. +The name and the url will be passed in the outputed event + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +# You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-v4.0.4.asciidoc b/docs/versioned-plugins/inputs/http_poller-v4.0.4.asciidoc new file mode 100644 index 000000000..8edcf588a --- /dev/null +++ b/docs/versioned-plugins/inputs/http_poller-v4.0.4.asciidoc @@ -0,0 +1,379 @@ +:plugin: http_poller +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.4 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v4.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http_poller input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and +send them on their merry way. The idea behind this plugins came from a need to read springboot +metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. + +==== Example +Reads from a list of urls and decodes the body of the response with a codec. +The config should look like this: + +[source,ruby] +---------------------------------- +input { + http_poller { + urls => { + test1 => "http://localhost:9200" + test2 => { + # Supports all options supported by ruby's Manticore HTTP client + method => get + user => "AzureDiamond" + password => "hunter2" + url => "http://localhost:9200/_cluster/health" + headers => { + Accept => "application/json" + } + } + } + request_timeout => 60 + # Supports "cron", "every", "at" and "in" schedules by rufus scheduler + schedule => { cron => "* * * * * UTC"} + codec => "json" + # A hash of request metadata info (timing, response headers, etc.) will be sent here + metadata_target => "http_poller_metadata" + } +} + +output { + stdout { + codec => rubydebug + } +} +---------------------------------- + +Using the HTTP poller with custom a custom CA or self signed cert. + +If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. + +[source,ruby] +---------------------------------- +openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks +---------------------------------- + +The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. + + +[source,ruby] +---------------------------------- + http_poller { + urls => { + myurl => "https://myhostname:1234" + } + truststore => "/path/to/downloaded_truststore.jks" + truststore_password => "mypassword" + schedule => { cron => "* * * * * UTC"} + } +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http_poller Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. +If you set this you must also set the `password` option. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to be used in conjunction with the username for HTTP authentication. + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-metadata_target"] +===== `metadata_target` + + * Value type is <> + * Default value is `"@metadata"` + +If you'd like to work with the request/response metadata. +Set this value to the name of the field you'd like to store a nested +hash of metadata. + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically poll from the urls +Format: A hash with + + key: "cron" | "every" | "in" | "at" + + value: string +Examples: + a) { "every" => "1h" } + b) { "cron" => "* * * * * UTC" } +See: rufus/scheduler for details about different schedule options and value string format + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-target"] +===== `target` + + * Value type is <> + * There is no default value for this setting. + +Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-urls"] +===== `urls` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A Hash of urls in this format : `"name" => "url"`. +The name and the url will be passed in the outputed event + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +# You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/imap-index.asciidoc b/docs/versioned-plugins/inputs/imap-index.asciidoc new file mode 100644 index 000000000..a0487c8a0 --- /dev/null +++ b/docs/versioned-plugins/inputs/imap-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: imap +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::imap-v3.0.5.asciidoc[] +include::imap-v3.0.4.asciidoc[] +include::imap-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/imap-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/imap-v3.0.3.asciidoc new file mode 100644 index 000000000..3f9c0d935 --- /dev/null +++ b/docs/versioned-plugins/inputs/imap-v3.0.3.asciidoc @@ -0,0 +1,176 @@ +:plugin: imap +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-imap/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Imap input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read mails from IMAP server + +Periodically scan an IMAP folder (`INBOX` by default) and move any read messages +to the trash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Imap Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-check_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-expunge>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-folder>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-lowercase_headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-strip_attachments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-verify_cert>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-check_interval"] +===== `check_interval` + + * Value type is <> + * Default value is `300` + + + +[id="{version}-plugins-{type}s-{plugin}-content_type"] +===== `content_type` + + * Value type is <> + * Default value is `"text/plain"` + +For multipart messages, use the first part that has this +content-type as the event message. + +[id="{version}-plugins-{type}s-{plugin}-delete"] +===== `delete` + + * Value type is <> + * Default value is `false` + + + +[id="{version}-plugins-{type}s-{plugin}-expunge"] +===== `expunge` + + * Value type is <> + * Default value is `false` + + + +[id="{version}-plugins-{type}s-{plugin}-fetch_count"] +===== `fetch_count` + + * Value type is <> + * Default value is `50` + + + +[id="{version}-plugins-{type}s-{plugin}-folder"] +===== `folder` + + * Value type is <> + * Default value is `"INBOX"` + + + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-lowercase_headers"] +===== `lowercase_headers` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-secure"] +===== `secure` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-strip_attachments"] +===== `strip_attachments` + + * Value type is <> + * Default value is `false` + + + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-verify_cert"] +===== `verify_cert` + + * Value type is <> + * Default value is `true` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/imap-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/imap-v3.0.4.asciidoc new file mode 100644 index 000000000..58778284b --- /dev/null +++ b/docs/versioned-plugins/inputs/imap-v3.0.4.asciidoc @@ -0,0 +1,176 @@ +:plugin: imap +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-imap/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Imap input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read mails from IMAP server + +Periodically scan an IMAP folder (`INBOX` by default) and move any read messages +to the trash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Imap Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-check_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-expunge>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-folder>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-lowercase_headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-strip_attachments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-verify_cert>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-check_interval"] +===== `check_interval` + + * Value type is <> + * Default value is `300` + + + +[id="{version}-plugins-{type}s-{plugin}-content_type"] +===== `content_type` + + * Value type is <> + * Default value is `"text/plain"` + +For multipart messages, use the first part that has this +content-type as the event message. + +[id="{version}-plugins-{type}s-{plugin}-delete"] +===== `delete` + + * Value type is <> + * Default value is `false` + + + +[id="{version}-plugins-{type}s-{plugin}-expunge"] +===== `expunge` + + * Value type is <> + * Default value is `false` + + + +[id="{version}-plugins-{type}s-{plugin}-fetch_count"] +===== `fetch_count` + + * Value type is <> + * Default value is `50` + + + +[id="{version}-plugins-{type}s-{plugin}-folder"] +===== `folder` + + * Value type is <> + * Default value is `"INBOX"` + + + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-lowercase_headers"] +===== `lowercase_headers` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-secure"] +===== `secure` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-strip_attachments"] +===== `strip_attachments` + + * Value type is <> + * Default value is `false` + + + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-verify_cert"] +===== `verify_cert` + + * Value type is <> + * Default value is `true` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/imap-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/imap-v3.0.5.asciidoc new file mode 100644 index 000000000..d032c9279 --- /dev/null +++ b/docs/versioned-plugins/inputs/imap-v3.0.5.asciidoc @@ -0,0 +1,176 @@ +:plugin: imap +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-imap/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Imap input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read mails from IMAP server + +Periodically scan an IMAP folder (`INBOX` by default) and move any read messages +to the trash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Imap Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-check_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-expunge>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-folder>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-lowercase_headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-strip_attachments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-verify_cert>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-check_interval"] +===== `check_interval` + + * Value type is <> + * Default value is `300` + + + +[id="{version}-plugins-{type}s-{plugin}-content_type"] +===== `content_type` + + * Value type is <> + * Default value is `"text/plain"` + +For multipart messages, use the first part that has this +content-type as the event message. + +[id="{version}-plugins-{type}s-{plugin}-delete"] +===== `delete` + + * Value type is <> + * Default value is `false` + + + +[id="{version}-plugins-{type}s-{plugin}-expunge"] +===== `expunge` + + * Value type is <> + * Default value is `false` + + + +[id="{version}-plugins-{type}s-{plugin}-fetch_count"] +===== `fetch_count` + + * Value type is <> + * Default value is `50` + + + +[id="{version}-plugins-{type}s-{plugin}-folder"] +===== `folder` + + * Value type is <> + * Default value is `"INBOX"` + + + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-lowercase_headers"] +===== `lowercase_headers` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-secure"] +===== `secure` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-strip_attachments"] +===== `strip_attachments` + + * Value type is <> + * Default value is `false` + + + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-verify_cert"] +===== `verify_cert` + + * Value type is <> + * Default value is `true` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/irc-index.asciidoc b/docs/versioned-plugins/inputs/irc-index.asciidoc new file mode 100644 index 000000000..0f778357c --- /dev/null +++ b/docs/versioned-plugins/inputs/irc-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: irc +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-07 +| <> | 2017-11-14 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::irc-v3.0.6.asciidoc[] +include::irc-v3.0.5.asciidoc[] +include::irc-v3.0.4.asciidoc[] +include::irc-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/irc-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/irc-v3.0.3.asciidoc new file mode 100644 index 000000000..7cd60f64b --- /dev/null +++ b/docs/versioned-plugins/inputs/irc-v3.0.3.asciidoc @@ -0,0 +1,152 @@ +:plugin: irc +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-irc/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Irc input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from an IRC Server. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Irc Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-catch_all>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-get_stats>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-nick>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-real>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-stats_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-catch_all"] +===== `catch_all` + + * Value type is <> + * Default value is `false` + +Catch all IRC channel/user events not just channel messages + +[id="{version}-plugins-{type}s-{plugin}-channels"] +===== `channels` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Channels to join and read messages from. + +These should be full channel names including the '#' symbol, such as +"#logstash". + +For passworded channels, add a space and the channel password, such as +"#logstash password". + + +[id="{version}-plugins-{type}s-{plugin}-get_stats"] +===== `get_stats` + + * Value type is <> + * Default value is `false` + +Gather and send user counts for channels - this requires catch_all and will force it + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Host of the IRC Server to connect to. + +[id="{version}-plugins-{type}s-{plugin}-nick"] +===== `nick` + + * Value type is <> + * Default value is `"logstash"` + +IRC Nickname + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +IRC Server password + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6667` + +Port for the IRC Server + +[id="{version}-plugins-{type}s-{plugin}-real"] +===== `real` + + * Value type is <> + * Default value is `"logstash"` + +IRC Real name + +[id="{version}-plugins-{type}s-{plugin}-secure"] +===== `secure` + + * Value type is <> + * Default value is `false` + +Set this to true to enable SSL. + +[id="{version}-plugins-{type}s-{plugin}-stats_interval"] +===== `stats_interval` + + * Value type is <> + * Default value is `5` + +How often in minutes to get the user count stats + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"logstash"` + +IRC Username + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/irc-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/irc-v3.0.4.asciidoc new file mode 100644 index 000000000..7146b93d4 --- /dev/null +++ b/docs/versioned-plugins/inputs/irc-v3.0.4.asciidoc @@ -0,0 +1,152 @@ +:plugin: irc +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-irc/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Irc input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from an IRC Server. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Irc Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-catch_all>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-get_stats>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-nick>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-real>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-stats_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-catch_all"] +===== `catch_all` + + * Value type is <> + * Default value is `false` + +Catch all IRC channel/user events not just channel messages + +[id="{version}-plugins-{type}s-{plugin}-channels"] +===== `channels` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Channels to join and read messages from. + +These should be full channel names including the '#' symbol, such as +"#logstash". + +For passworded channels, add a space and the channel password, such as +"#logstash password". + + +[id="{version}-plugins-{type}s-{plugin}-get_stats"] +===== `get_stats` + + * Value type is <> + * Default value is `false` + +Gather and send user counts for channels - this requires catch_all and will force it + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Host of the IRC Server to connect to. + +[id="{version}-plugins-{type}s-{plugin}-nick"] +===== `nick` + + * Value type is <> + * Default value is `"logstash"` + +IRC Nickname + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +IRC Server password + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6667` + +Port for the IRC Server + +[id="{version}-plugins-{type}s-{plugin}-real"] +===== `real` + + * Value type is <> + * Default value is `"logstash"` + +IRC Real name + +[id="{version}-plugins-{type}s-{plugin}-secure"] +===== `secure` + + * Value type is <> + * Default value is `false` + +Set this to true to enable SSL. + +[id="{version}-plugins-{type}s-{plugin}-stats_interval"] +===== `stats_interval` + + * Value type is <> + * Default value is `5` + +How often in minutes to get the user count stats + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"logstash"` + +IRC Username + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/irc-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/irc-v3.0.5.asciidoc new file mode 100644 index 000000000..49a8e3929 --- /dev/null +++ b/docs/versioned-plugins/inputs/irc-v3.0.5.asciidoc @@ -0,0 +1,152 @@ +:plugin: irc +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-14 +:changelog_url: https://github.com/logstash-plugins/logstash-input-irc/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Irc input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from an IRC Server. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Irc Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-catch_all>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-get_stats>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-nick>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-real>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-stats_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-catch_all"] +===== `catch_all` + + * Value type is <> + * Default value is `false` + +Catch all IRC channel/user events not just channel messages + +[id="{version}-plugins-{type}s-{plugin}-channels"] +===== `channels` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Channels to join and read messages from. + +These should be full channel names including the '#' symbol, such as +"#logstash". + +For passworded channels, add a space and the channel password, such as +"#logstash password". + + +[id="{version}-plugins-{type}s-{plugin}-get_stats"] +===== `get_stats` + + * Value type is <> + * Default value is `false` + +Gather and send user counts for channels - this requires catch_all and will force it + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Host of the IRC Server to connect to. + +[id="{version}-plugins-{type}s-{plugin}-nick"] +===== `nick` + + * Value type is <> + * Default value is `"logstash"` + +IRC Nickname + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +IRC Server password + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6667` + +Port for the IRC Server + +[id="{version}-plugins-{type}s-{plugin}-real"] +===== `real` + + * Value type is <> + * Default value is `"logstash"` + +IRC Real name + +[id="{version}-plugins-{type}s-{plugin}-secure"] +===== `secure` + + * Value type is <> + * Default value is `false` + +Set this to true to enable SSL. + +[id="{version}-plugins-{type}s-{plugin}-stats_interval"] +===== `stats_interval` + + * Value type is <> + * Default value is `5` + +How often in minutes to get the user count stats + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"logstash"` + +IRC Username + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/irc-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/irc-v3.0.6.asciidoc new file mode 100644 index 000000000..e5f3a11f9 --- /dev/null +++ b/docs/versioned-plugins/inputs/irc-v3.0.6.asciidoc @@ -0,0 +1,152 @@ +:plugin: irc +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-12-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-irc/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Irc input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from an IRC Server. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Irc Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-catch_all>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-get_stats>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-nick>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-real>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-stats_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-catch_all"] +===== `catch_all` + + * Value type is <> + * Default value is `false` + +Catch all IRC channel/user events not just channel messages + +[id="{version}-plugins-{type}s-{plugin}-channels"] +===== `channels` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Channels to join and read messages from. + +These should be full channel names including the '#' symbol, such as +"#logstash". + +For passworded channels, add a space and the channel password, such as +"#logstash password". + + +[id="{version}-plugins-{type}s-{plugin}-get_stats"] +===== `get_stats` + + * Value type is <> + * Default value is `false` + +Gather and send user counts for channels - this requires catch_all and will force it + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Host of the IRC Server to connect to. + +[id="{version}-plugins-{type}s-{plugin}-nick"] +===== `nick` + + * Value type is <> + * Default value is `"logstash"` + +IRC Nickname + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +IRC Server password + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6667` + +Port for the IRC Server + +[id="{version}-plugins-{type}s-{plugin}-real"] +===== `real` + + * Value type is <> + * Default value is `"logstash"` + +IRC Real name + +[id="{version}-plugins-{type}s-{plugin}-secure"] +===== `secure` + + * Value type is <> + * Default value is `false` + +Set this to true to enable SSL. + +[id="{version}-plugins-{type}s-{plugin}-stats_interval"] +===== `stats_interval` + + * Value type is <> + * Default value is `5` + +How often in minutes to get the user count stats + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"logstash"` + +IRC Username + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jdbc-index.asciidoc b/docs/versioned-plugins/inputs/jdbc-index.asciidoc new file mode 100644 index 000000000..743f4c280 --- /dev/null +++ b/docs/versioned-plugins/inputs/jdbc-index.asciidoc @@ -0,0 +1,26 @@ +:plugin: jdbc +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-14 +| <> | 2017-12-07 +| <> | 2017-11-07 +| <> | 2017-10-27 +| <> | 2017-08-21 +| <> | 2017-08-15 +| <> | 2017-07-25 +| <> | 2017-06-23 +|======================================================================= + +include::jdbc-v4.3.3.asciidoc[] +include::jdbc-v4.3.2.asciidoc[] +include::jdbc-v4.3.1.asciidoc[] +include::jdbc-v4.3.0.asciidoc[] +include::jdbc-v4.2.4.asciidoc[] +include::jdbc-v4.2.3.asciidoc[] +include::jdbc-v4.2.2.asciidoc[] +include::jdbc-v4.2.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/jdbc-v4.2.1.asciidoc b/docs/versioned-plugins/inputs/jdbc-v4.2.1.asciidoc new file mode 100644 index 000000000..c71364058 --- /dev/null +++ b/docs/versioned-plugins/inputs/jdbc-v4.2.1.asciidoc @@ -0,0 +1,486 @@ +:plugin: jdbc +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.2.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-jdbc/blob/v4.2.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Jdbc input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This plugin was created as a way to ingest data in any database +with a JDBC interface into Logstash. You can periodically schedule ingestion +using a cron syntax (see `schedule` setting) or run the query one time to load +data into Logstash. Each row in the resultset becomes a single event. +Columns in the resultset are converted into fields in the event. + +==== Drivers + +This plugin does not come packaged with JDBC driver libraries. The desired +jdbc driver library must be explicitly passed in to the plugin using the +`jdbc_driver_library` configuration option. + +==== Scheduling + +Input from this plugin can be scheduled to run periodically according to a specific +schedule. This scheduling syntax is powered by https://github.com/jmettraux/rufus-scheduler[rufus-scheduler]. +The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ). + +Examples: + +|========================================================== +| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. +| `0 * * * *` | will execute on the 0th minute of every hour every day. +| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. +|========================================================== + + +Further documentation describing this syntax can be found https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings[here]. + +==== State + +The plugin will persist the `sql_last_value` parameter in the form of a +metadata file stored in the configured `last_run_metadata_path`. Upon query execution, +this file will be updated with the current value of `sql_last_value`. Next time +the pipeline starts up, this value will be updated by reading from the file. If +`clean_run` is set to true, this value will be ignored and `sql_last_value` will be +set to Jan 1, 1970, or 0 if `use_column_value` is true, as if no query has ever been executed. + +==== Dealing With Large Result-sets + +Many JDBC drivers use the `fetch_size` parameter to limit how many +results are pre-fetched at a time from the cursor into the client's cache +before retrieving more results from the result-set. This is configured in +this plugin using the `jdbc_fetch_size` configuration option. No fetch size +is set by default in this plugin, so the specific driver's default size will +be used. + +==== Usage: + +Here is an example of setting up the plugin to fetch data from a MySQL database. +First, we place the appropriate JDBC driver library in our current +path (this can be placed anywhere on your filesystem). In this example, we connect to +the 'mydb' database using the user: 'mysql' and wish to input all rows in the 'songs' +table that match a specific artist. The following examples demonstrates a possible +Logstash configuration for this. The `schedule` option in this example will +instruct the plugin to execute this input statement on the minute, every minute. + +[source,ruby] +------------------------------------------------------------------------------ +input { + jdbc { + jdbc_driver_library => "mysql-connector-java-5.1.36-bin.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://localhost:3306/mydb" + jdbc_user => "mysql" + parameters => { "favorite_artist" => "Beethoven" } + schedule => "* * * * *" + statement => "SELECT * from songs where artist = :favorite_artist" + } +} +------------------------------------------------------------------------------ + +==== Configuring SQL statement + +A sql statement is required for this input. This can be passed-in via a +statement option in the form of a string, or read from a file (`statement_filepath`). File +option is typically used when the SQL statement is large or cumbersome to supply in the config. +The file option only supports one SQL statement. The plugin will only accept one of the options. +It cannot read a statement from a file as well as from the `statement` configuration parameter. + +==== Configuring multiple SQL statements + +Configuring multiple SQL statements is useful when there is a need to query and ingest data +from different database tables or views. It is possible to define separate Logstash +configuration files for each statement or to define multiple statements in a single configuration +file. When using multiple statements in a single Logstash configuration file, each statement +has to be defined as a separate jdbc input (including jdbc driver, connection string and other +required parameters). + +Please note that if any of the statements use the `sql_last_value` parameter (e.g. for +ingesting only data changed since last run), each input should define its own +`last_run_metadata_path` parameter. Failure to do so will result in undesired behaviour, as +all inputs will store their state to the same (default) metadata file, effectively +overwriting each other's `sql_last_value`. + +==== Predefined Parameters + +Some parameters are built-in and can be used from within your queries. +Here is the list: + +|========================================================== +|sql_last_value | The value used to calculate which rows to query. Before any query is run, +this is set to Thursday, 1 January 1970, or 0 if `use_column_value` is true and +`tracking_column` is set. It is updated accordingly after subsequent queries are run. +|========================================================== + +Example: +[source,ruby] +--------------------------------------------------------------------------------------------------- +input { + jdbc { + statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value" + use_column_value => true + tracking_column => "id" + # ... other configuration bits + } +} +--------------------------------------------------------------------------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jdbc Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-clean_run>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-columns_charset>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_default_timezone>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_fetch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_page_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password_filepath>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-last_run_metadata_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lowercase_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-record_last_run>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sequel_opts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sql_log_level>> |<>, one of `["fatal", "error", "warn", "info", "debug"]`|No +| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-statement_filepath>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-tracking_column>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tracking_column_type>> |<>, one of `["numeric", "timestamp"]`|No +| <<{version}-plugins-{type}s-{plugin}-use_column_value>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-clean_run"] +===== `clean_run` + + * Value type is <> + * Default value is `false` + +Whether the previous run state should be preserved + +[id="{version}-plugins-{type}s-{plugin}-columns_charset"] +===== `columns_charset` + + * Value type is <> + * Default value is `{}` + +The character encoding for specific columns. This option will override the `:charset` option +for the specified columns. + +Example: +[source,ruby] +------------------------------------------------------- +input { + jdbc { + ... + columns_charset => { "column0" => "ISO-8859-1" } + ... + } +} +------------------------------------------------------- +this will only convert column0 that has ISO-8859-1 as an original encoding. + +[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts"] +===== `connection_retry_attempts` + + * Value type is <> + * Default value is `1` + +Maximum number of times to try connecting to database + +[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time"] +===== `connection_retry_attempts_wait_time` + + * Value type is <> + * Default value is `0.5` + +Number of seconds to sleep between connection attempts + +[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] +===== `jdbc_connection_string` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC connection string + +[id="{version}-plugins-{type}s-{plugin}-jdbc_default_timezone"] +===== `jdbc_default_timezone` + + * Value type is <> + * There is no default value for this setting. + +Timezone conversion. +SQL does not allow for timezone data in timestamp fields. This plugin will automatically +convert your SQL timestamp fields to Logstash timestamps, in relative UTC time in ISO8601 format. + +Using this setting will manually assign a specified timezone offset, instead +of using the timezone setting of the local machine. You must use a canonical +timezone, *America/Denver*, for example. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] +===== `jdbc_driver_class` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC driver class to load, for exmaple, "org.apache.derby.jdbc.ClientDriver" +NB per https://github.com/logstash-plugins/logstash-input-jdbc/issues/43 if you are using +the Oracle JDBC driver (ojdbc6.jar) the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"` + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] +===== `jdbc_driver_library` + + * Value type is <> + * There is no default value for this setting. + +Tentative of abstracting JDBC logic to a mixin +for potential reuse in other plugins (input/output) +This method is called when someone includes this module +Add these methods to the 'base' given. +JDBC driver library path to third party driver library. In case of multiple libraries being +required you can pass them separated by a comma. + +If not provided, Plugin will look for the driver class in the Logstash Java classpath. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_fetch_size"] +===== `jdbc_fetch_size` + + * Value type is <> + * There is no default value for this setting. + +JDBC fetch size. if not provided, respective driver's default will be used + +[id="{version}-plugins-{type}s-{plugin}-jdbc_page_size"] +===== `jdbc_page_size` + + * Value type is <> + * Default value is `100000` + +JDBC page size + +[id="{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled"] +===== `jdbc_paging_enabled` + + * Value type is <> + * Default value is `false` + +JDBC enable paging + +This will cause a sql statement to be broken up into multiple queries. +Each query will use limits and offsets to collectively retrieve the full +result-set. The limit size is set with `jdbc_page_size`. + +Be aware that ordering is not guaranteed between queries. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] +===== `jdbc_password` + + * Value type is <> + * There is no default value for this setting. + +JDBC password + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password_filepath"] +===== `jdbc_password_filepath` + + * Value type is <> + * There is no default value for this setting. + +JDBC password filename + +[id="{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout"] +===== `jdbc_pool_timeout` + + * Value type is <> + * Default value is `5` + +Connection pool configuration. +The amount of seconds to wait to acquire a connection before raising a PoolTimeoutError (default 5) + +[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] +===== `jdbc_user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC user + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] +===== `jdbc_validate_connection` + + * Value type is <> + * Default value is `false` + +Connection pool configuration. +Validate connection before use. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] +===== `jdbc_validation_timeout` + + * Value type is <> + * Default value is `3600` + +Connection pool configuration. +How often to validate a connection (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-last_run_metadata_path"] +===== `last_run_metadata_path` + + * Value type is <> + * Default value is `"/home/ph/.logstash_jdbc_last_run"` + +Path to file with last run time + +[id="{version}-plugins-{type}s-{plugin}-lowercase_column_names"] +===== `lowercase_column_names` + + * Value type is <> + * Default value is `true` + +Whether to force the lowercasing of identifier fields + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * Default value is `{}` + +Hash of query parameter, for example `{ "target_id" => "321" }` + +[id="{version}-plugins-{type}s-{plugin}-record_last_run"] +===== `record_last_run` + + * Value type is <> + * Default value is `true` + +Whether to save state or not in last_run_metadata_path + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically run statement, in Cron format +for example: "* * * * *" (execute query every minute, on the minute) + +There is no schedule by default. If no schedule is given, then the statement is run +exactly once. + +[id="{version}-plugins-{type}s-{plugin}-sequel_opts"] +===== `sequel_opts` + + * Value type is <> + * Default value is `{}` + +General/Vendor-specific Sequel configuration options. + +An example of an optional connection pool configuration + max_connections - The maximum number of connections the connection pool + +examples of vendor-specific options can be found in this +documentation page: https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc + +[id="{version}-plugins-{type}s-{plugin}-sql_log_level"] +===== `sql_log_level` + + * Value can be any of: `fatal`, `error`, `warn`, `info`, `debug` + * Default value is `"info"` + +Log level at which to log SQL queries, the accepted values are the common ones fatal, error, warn, +info and debug. The default value is info. + +[id="{version}-plugins-{type}s-{plugin}-statement"] +===== `statement` + + * Value type is <> + * There is no default value for this setting. + +If undefined, Logstash will complain, even if codec is unused. +Statement to execute + +To use parameters, use named parameter syntax. +For example: + +[source, ruby] +----------------------------------------------- +"SELECT * FROM MYTABLE WHERE id = :target_id" +----------------------------------------------- + +here, ":target_id" is a named parameter. You can configure named parameters +with the `parameters` setting. + +[id="{version}-plugins-{type}s-{plugin}-statement_filepath"] +===== `statement_filepath` + + * Value type is <> + * There is no default value for this setting. + +Path of file containing statement to execute + +[id="{version}-plugins-{type}s-{plugin}-tracking_column"] +===== `tracking_column` + + * Value type is <> + * There is no default value for this setting. + +If tracking column value rather than timestamp, the column whose value is to be tracked + +[id="{version}-plugins-{type}s-{plugin}-tracking_column_type"] +===== `tracking_column_type` + + * Value can be any of: `numeric`, `timestamp` + * Default value is `"numeric"` + +Type of tracking column. Currently only "numeric" and "timestamp" + +[id="{version}-plugins-{type}s-{plugin}-use_column_value"] +===== `use_column_value` + + * Value type is <> + * Default value is `false` + +Use an incremental column value rather than a timestamp + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jdbc-v4.2.2.asciidoc b/docs/versioned-plugins/inputs/jdbc-v4.2.2.asciidoc new file mode 100644 index 000000000..597a8b55e --- /dev/null +++ b/docs/versioned-plugins/inputs/jdbc-v4.2.2.asciidoc @@ -0,0 +1,486 @@ +:plugin: jdbc +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.2.2 +:release_date: 2017-07-25 +:changelog_url: https://github.com/logstash-plugins/logstash-input-jdbc/blob/v4.2.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Jdbc input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This plugin was created as a way to ingest data in any database +with a JDBC interface into Logstash. You can periodically schedule ingestion +using a cron syntax (see `schedule` setting) or run the query one time to load +data into Logstash. Each row in the resultset becomes a single event. +Columns in the resultset are converted into fields in the event. + +==== Drivers + +This plugin does not come packaged with JDBC driver libraries. The desired +jdbc driver library must be explicitly passed in to the plugin using the +`jdbc_driver_library` configuration option. + +==== Scheduling + +Input from this plugin can be scheduled to run periodically according to a specific +schedule. This scheduling syntax is powered by https://github.com/jmettraux/rufus-scheduler[rufus-scheduler]. +The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ). + +Examples: + +|========================================================== +| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. +| `0 * * * *` | will execute on the 0th minute of every hour every day. +| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. +|========================================================== + + +Further documentation describing this syntax can be found https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings[here]. + +==== State + +The plugin will persist the `sql_last_value` parameter in the form of a +metadata file stored in the configured `last_run_metadata_path`. Upon query execution, +this file will be updated with the current value of `sql_last_value`. Next time +the pipeline starts up, this value will be updated by reading from the file. If +`clean_run` is set to true, this value will be ignored and `sql_last_value` will be +set to Jan 1, 1970, or 0 if `use_column_value` is true, as if no query has ever been executed. + +==== Dealing With Large Result-sets + +Many JDBC drivers use the `fetch_size` parameter to limit how many +results are pre-fetched at a time from the cursor into the client's cache +before retrieving more results from the result-set. This is configured in +this plugin using the `jdbc_fetch_size` configuration option. No fetch size +is set by default in this plugin, so the specific driver's default size will +be used. + +==== Usage: + +Here is an example of setting up the plugin to fetch data from a MySQL database. +First, we place the appropriate JDBC driver library in our current +path (this can be placed anywhere on your filesystem). In this example, we connect to +the 'mydb' database using the user: 'mysql' and wish to input all rows in the 'songs' +table that match a specific artist. The following examples demonstrates a possible +Logstash configuration for this. The `schedule` option in this example will +instruct the plugin to execute this input statement on the minute, every minute. + +[source,ruby] +------------------------------------------------------------------------------ +input { + jdbc { + jdbc_driver_library => "mysql-connector-java-5.1.36-bin.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://localhost:3306/mydb" + jdbc_user => "mysql" + parameters => { "favorite_artist" => "Beethoven" } + schedule => "* * * * *" + statement => "SELECT * from songs where artist = :favorite_artist" + } +} +------------------------------------------------------------------------------ + +==== Configuring SQL statement + +A sql statement is required for this input. This can be passed-in via a +statement option in the form of a string, or read from a file (`statement_filepath`). File +option is typically used when the SQL statement is large or cumbersome to supply in the config. +The file option only supports one SQL statement. The plugin will only accept one of the options. +It cannot read a statement from a file as well as from the `statement` configuration parameter. + +==== Configuring multiple SQL statements + +Configuring multiple SQL statements is useful when there is a need to query and ingest data +from different database tables or views. It is possible to define separate Logstash +configuration files for each statement or to define multiple statements in a single configuration +file. When using multiple statements in a single Logstash configuration file, each statement +has to be defined as a separate jdbc input (including jdbc driver, connection string and other +required parameters). + +Please note that if any of the statements use the `sql_last_value` parameter (e.g. for +ingesting only data changed since last run), each input should define its own +`last_run_metadata_path` parameter. Failure to do so will result in undesired behaviour, as +all inputs will store their state to the same (default) metadata file, effectively +overwriting each other's `sql_last_value`. + +==== Predefined Parameters + +Some parameters are built-in and can be used from within your queries. +Here is the list: + +|========================================================== +|sql_last_value | The value used to calculate which rows to query. Before any query is run, +this is set to Thursday, 1 January 1970, or 0 if `use_column_value` is true and +`tracking_column` is set. It is updated accordingly after subsequent queries are run. +|========================================================== + +Example: +[source,ruby] +--------------------------------------------------------------------------------------------------- +input { + jdbc { + statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value" + use_column_value => true + tracking_column => "id" + # ... other configuration bits + } +} +--------------------------------------------------------------------------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jdbc Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-clean_run>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-columns_charset>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_default_timezone>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_fetch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_page_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password_filepath>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-last_run_metadata_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lowercase_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-record_last_run>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sequel_opts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sql_log_level>> |<>, one of `["fatal", "error", "warn", "info", "debug"]`|No +| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-statement_filepath>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-tracking_column>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tracking_column_type>> |<>, one of `["numeric", "timestamp"]`|No +| <<{version}-plugins-{type}s-{plugin}-use_column_value>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-clean_run"] +===== `clean_run` + + * Value type is <> + * Default value is `false` + +Whether the previous run state should be preserved + +[id="{version}-plugins-{type}s-{plugin}-columns_charset"] +===== `columns_charset` + + * Value type is <> + * Default value is `{}` + +The character encoding for specific columns. This option will override the `:charset` option +for the specified columns. + +Example: +[source,ruby] +------------------------------------------------------- +input { + jdbc { + ... + columns_charset => { "column0" => "ISO-8859-1" } + ... + } +} +------------------------------------------------------- +this will only convert column0 that has ISO-8859-1 as an original encoding. + +[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts"] +===== `connection_retry_attempts` + + * Value type is <> + * Default value is `1` + +Maximum number of times to try connecting to database + +[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time"] +===== `connection_retry_attempts_wait_time` + + * Value type is <> + * Default value is `0.5` + +Number of seconds to sleep between connection attempts + +[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] +===== `jdbc_connection_string` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC connection string + +[id="{version}-plugins-{type}s-{plugin}-jdbc_default_timezone"] +===== `jdbc_default_timezone` + + * Value type is <> + * There is no default value for this setting. + +Timezone conversion. +SQL does not allow for timezone data in timestamp fields. This plugin will automatically +convert your SQL timestamp fields to Logstash timestamps, in relative UTC time in ISO8601 format. + +Using this setting will manually assign a specified timezone offset, instead +of using the timezone setting of the local machine. You must use a canonical +timezone, *America/Denver*, for example. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] +===== `jdbc_driver_class` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC driver class to load, for exmaple, "org.apache.derby.jdbc.ClientDriver" +NB per https://github.com/logstash-plugins/logstash-input-jdbc/issues/43 if you are using +the Oracle JDBC driver (ojdbc6.jar) the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"` + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] +===== `jdbc_driver_library` + + * Value type is <> + * There is no default value for this setting. + +Tentative of abstracting JDBC logic to a mixin +for potential reuse in other plugins (input/output) +This method is called when someone includes this module +Add these methods to the 'base' given. +JDBC driver library path to third party driver library. In case of multiple libraries being +required you can pass them separated by a comma. + +If not provided, Plugin will look for the driver class in the Logstash Java classpath. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_fetch_size"] +===== `jdbc_fetch_size` + + * Value type is <> + * There is no default value for this setting. + +JDBC fetch size. if not provided, respective driver's default will be used + +[id="{version}-plugins-{type}s-{plugin}-jdbc_page_size"] +===== `jdbc_page_size` + + * Value type is <> + * Default value is `100000` + +JDBC page size + +[id="{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled"] +===== `jdbc_paging_enabled` + + * Value type is <> + * Default value is `false` + +JDBC enable paging + +This will cause a sql statement to be broken up into multiple queries. +Each query will use limits and offsets to collectively retrieve the full +result-set. The limit size is set with `jdbc_page_size`. + +Be aware that ordering is not guaranteed between queries. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] +===== `jdbc_password` + + * Value type is <> + * There is no default value for this setting. + +JDBC password + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password_filepath"] +===== `jdbc_password_filepath` + + * Value type is <> + * There is no default value for this setting. + +JDBC password filename + +[id="{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout"] +===== `jdbc_pool_timeout` + + * Value type is <> + * Default value is `5` + +Connection pool configuration. +The amount of seconds to wait to acquire a connection before raising a PoolTimeoutError (default 5) + +[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] +===== `jdbc_user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC user + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] +===== `jdbc_validate_connection` + + * Value type is <> + * Default value is `false` + +Connection pool configuration. +Validate connection before use. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] +===== `jdbc_validation_timeout` + + * Value type is <> + * Default value is `3600` + +Connection pool configuration. +How often to validate a connection (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-last_run_metadata_path"] +===== `last_run_metadata_path` + + * Value type is <> + * Default value is `"/home/ph/.logstash_jdbc_last_run"` + +Path to file with last run time + +[id="{version}-plugins-{type}s-{plugin}-lowercase_column_names"] +===== `lowercase_column_names` + + * Value type is <> + * Default value is `true` + +Whether to force the lowercasing of identifier fields + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * Default value is `{}` + +Hash of query parameter, for example `{ "target_id" => "321" }` + +[id="{version}-plugins-{type}s-{plugin}-record_last_run"] +===== `record_last_run` + + * Value type is <> + * Default value is `true` + +Whether to save state or not in last_run_metadata_path + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically run statement, in Cron format +for example: "* * * * *" (execute query every minute, on the minute) + +There is no schedule by default. If no schedule is given, then the statement is run +exactly once. + +[id="{version}-plugins-{type}s-{plugin}-sequel_opts"] +===== `sequel_opts` + + * Value type is <> + * Default value is `{}` + +General/Vendor-specific Sequel configuration options. + +An example of an optional connection pool configuration + max_connections - The maximum number of connections the connection pool + +examples of vendor-specific options can be found in this +documentation page: https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc + +[id="{version}-plugins-{type}s-{plugin}-sql_log_level"] +===== `sql_log_level` + + * Value can be any of: `fatal`, `error`, `warn`, `info`, `debug` + * Default value is `"info"` + +Log level at which to log SQL queries, the accepted values are the common ones fatal, error, warn, +info and debug. The default value is info. + +[id="{version}-plugins-{type}s-{plugin}-statement"] +===== `statement` + + * Value type is <> + * There is no default value for this setting. + +If undefined, Logstash will complain, even if codec is unused. +Statement to execute + +To use parameters, use named parameter syntax. +For example: + +[source, ruby] +----------------------------------------------- +"SELECT * FROM MYTABLE WHERE id = :target_id" +----------------------------------------------- + +here, ":target_id" is a named parameter. You can configure named parameters +with the `parameters` setting. + +[id="{version}-plugins-{type}s-{plugin}-statement_filepath"] +===== `statement_filepath` + + * Value type is <> + * There is no default value for this setting. + +Path of file containing statement to execute + +[id="{version}-plugins-{type}s-{plugin}-tracking_column"] +===== `tracking_column` + + * Value type is <> + * There is no default value for this setting. + +If tracking column value rather than timestamp, the column whose value is to be tracked + +[id="{version}-plugins-{type}s-{plugin}-tracking_column_type"] +===== `tracking_column_type` + + * Value can be any of: `numeric`, `timestamp` + * Default value is `"numeric"` + +Type of tracking column. Currently only "numeric" and "timestamp" + +[id="{version}-plugins-{type}s-{plugin}-use_column_value"] +===== `use_column_value` + + * Value type is <> + * Default value is `false` + +Use an incremental column value rather than a timestamp + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jdbc-v4.2.3.asciidoc b/docs/versioned-plugins/inputs/jdbc-v4.2.3.asciidoc new file mode 100644 index 000000000..867104753 --- /dev/null +++ b/docs/versioned-plugins/inputs/jdbc-v4.2.3.asciidoc @@ -0,0 +1,486 @@ +:plugin: jdbc +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.2.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-jdbc/blob/v4.2.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Jdbc input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This plugin was created as a way to ingest data in any database +with a JDBC interface into Logstash. You can periodically schedule ingestion +using a cron syntax (see `schedule` setting) or run the query one time to load +data into Logstash. Each row in the resultset becomes a single event. +Columns in the resultset are converted into fields in the event. + +==== Drivers + +This plugin does not come packaged with JDBC driver libraries. The desired +jdbc driver library must be explicitly passed in to the plugin using the +`jdbc_driver_library` configuration option. + +==== Scheduling + +Input from this plugin can be scheduled to run periodically according to a specific +schedule. This scheduling syntax is powered by https://github.com/jmettraux/rufus-scheduler[rufus-scheduler]. +The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ). + +Examples: + +|========================================================== +| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. +| `0 * * * *` | will execute on the 0th minute of every hour every day. +| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. +|========================================================== + + +Further documentation describing this syntax can be found https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings[here]. + +==== State + +The plugin will persist the `sql_last_value` parameter in the form of a +metadata file stored in the configured `last_run_metadata_path`. Upon query execution, +this file will be updated with the current value of `sql_last_value`. Next time +the pipeline starts up, this value will be updated by reading from the file. If +`clean_run` is set to true, this value will be ignored and `sql_last_value` will be +set to Jan 1, 1970, or 0 if `use_column_value` is true, as if no query has ever been executed. + +==== Dealing With Large Result-sets + +Many JDBC drivers use the `fetch_size` parameter to limit how many +results are pre-fetched at a time from the cursor into the client's cache +before retrieving more results from the result-set. This is configured in +this plugin using the `jdbc_fetch_size` configuration option. No fetch size +is set by default in this plugin, so the specific driver's default size will +be used. + +==== Usage: + +Here is an example of setting up the plugin to fetch data from a MySQL database. +First, we place the appropriate JDBC driver library in our current +path (this can be placed anywhere on your filesystem). In this example, we connect to +the 'mydb' database using the user: 'mysql' and wish to input all rows in the 'songs' +table that match a specific artist. The following examples demonstrates a possible +Logstash configuration for this. The `schedule` option in this example will +instruct the plugin to execute this input statement on the minute, every minute. + +[source,ruby] +------------------------------------------------------------------------------ +input { + jdbc { + jdbc_driver_library => "mysql-connector-java-5.1.36-bin.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://localhost:3306/mydb" + jdbc_user => "mysql" + parameters => { "favorite_artist" => "Beethoven" } + schedule => "* * * * *" + statement => "SELECT * from songs where artist = :favorite_artist" + } +} +------------------------------------------------------------------------------ + +==== Configuring SQL statement + +A sql statement is required for this input. This can be passed-in via a +statement option in the form of a string, or read from a file (`statement_filepath`). File +option is typically used when the SQL statement is large or cumbersome to supply in the config. +The file option only supports one SQL statement. The plugin will only accept one of the options. +It cannot read a statement from a file as well as from the `statement` configuration parameter. + +==== Configuring multiple SQL statements + +Configuring multiple SQL statements is useful when there is a need to query and ingest data +from different database tables or views. It is possible to define separate Logstash +configuration files for each statement or to define multiple statements in a single configuration +file. When using multiple statements in a single Logstash configuration file, each statement +has to be defined as a separate jdbc input (including jdbc driver, connection string and other +required parameters). + +Please note that if any of the statements use the `sql_last_value` parameter (e.g. for +ingesting only data changed since last run), each input should define its own +`last_run_metadata_path` parameter. Failure to do so will result in undesired behaviour, as +all inputs will store their state to the same (default) metadata file, effectively +overwriting each other's `sql_last_value`. + +==== Predefined Parameters + +Some parameters are built-in and can be used from within your queries. +Here is the list: + +|========================================================== +|sql_last_value | The value used to calculate which rows to query. Before any query is run, +this is set to Thursday, 1 January 1970, or 0 if `use_column_value` is true and +`tracking_column` is set. It is updated accordingly after subsequent queries are run. +|========================================================== + +Example: +[source,ruby] +--------------------------------------------------------------------------------------------------- +input { + jdbc { + statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value" + use_column_value => true + tracking_column => "id" + # ... other configuration bits + } +} +--------------------------------------------------------------------------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jdbc Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-clean_run>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-columns_charset>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_default_timezone>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_fetch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_page_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password_filepath>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-last_run_metadata_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lowercase_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-record_last_run>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sequel_opts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sql_log_level>> |<>, one of `["fatal", "error", "warn", "info", "debug"]`|No +| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-statement_filepath>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-tracking_column>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tracking_column_type>> |<>, one of `["numeric", "timestamp"]`|No +| <<{version}-plugins-{type}s-{plugin}-use_column_value>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-clean_run"] +===== `clean_run` + + * Value type is <> + * Default value is `false` + +Whether the previous run state should be preserved + +[id="{version}-plugins-{type}s-{plugin}-columns_charset"] +===== `columns_charset` + + * Value type is <> + * Default value is `{}` + +The character encoding for specific columns. This option will override the `:charset` option +for the specified columns. + +Example: +[source,ruby] +------------------------------------------------------- +input { + jdbc { + ... + columns_charset => { "column0" => "ISO-8859-1" } + ... + } +} +------------------------------------------------------- +this will only convert column0 that has ISO-8859-1 as an original encoding. + +[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts"] +===== `connection_retry_attempts` + + * Value type is <> + * Default value is `1` + +Maximum number of times to try connecting to database + +[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time"] +===== `connection_retry_attempts_wait_time` + + * Value type is <> + * Default value is `0.5` + +Number of seconds to sleep between connection attempts + +[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] +===== `jdbc_connection_string` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC connection string + +[id="{version}-plugins-{type}s-{plugin}-jdbc_default_timezone"] +===== `jdbc_default_timezone` + + * Value type is <> + * There is no default value for this setting. + +Timezone conversion. +SQL does not allow for timezone data in timestamp fields. This plugin will automatically +convert your SQL timestamp fields to Logstash timestamps, in relative UTC time in ISO8601 format. + +Using this setting will manually assign a specified timezone offset, instead +of using the timezone setting of the local machine. You must use a canonical +timezone, *America/Denver*, for example. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] +===== `jdbc_driver_class` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC driver class to load, for exmaple, "org.apache.derby.jdbc.ClientDriver" +NB per https://github.com/logstash-plugins/logstash-input-jdbc/issues/43 if you are using +the Oracle JDBC driver (ojdbc6.jar) the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"` + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] +===== `jdbc_driver_library` + + * Value type is <> + * There is no default value for this setting. + +Tentative of abstracting JDBC logic to a mixin +for potential reuse in other plugins (input/output) +This method is called when someone includes this module +Add these methods to the 'base' given. +JDBC driver library path to third party driver library. In case of multiple libraries being +required you can pass them separated by a comma. + +If not provided, Plugin will look for the driver class in the Logstash Java classpath. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_fetch_size"] +===== `jdbc_fetch_size` + + * Value type is <> + * There is no default value for this setting. + +JDBC fetch size. if not provided, respective driver's default will be used + +[id="{version}-plugins-{type}s-{plugin}-jdbc_page_size"] +===== `jdbc_page_size` + + * Value type is <> + * Default value is `100000` + +JDBC page size + +[id="{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled"] +===== `jdbc_paging_enabled` + + * Value type is <> + * Default value is `false` + +JDBC enable paging + +This will cause a sql statement to be broken up into multiple queries. +Each query will use limits and offsets to collectively retrieve the full +result-set. The limit size is set with `jdbc_page_size`. + +Be aware that ordering is not guaranteed between queries. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] +===== `jdbc_password` + + * Value type is <> + * There is no default value for this setting. + +JDBC password + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password_filepath"] +===== `jdbc_password_filepath` + + * Value type is <> + * There is no default value for this setting. + +JDBC password filename + +[id="{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout"] +===== `jdbc_pool_timeout` + + * Value type is <> + * Default value is `5` + +Connection pool configuration. +The amount of seconds to wait to acquire a connection before raising a PoolTimeoutError (default 5) + +[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] +===== `jdbc_user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC user + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] +===== `jdbc_validate_connection` + + * Value type is <> + * Default value is `false` + +Connection pool configuration. +Validate connection before use. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] +===== `jdbc_validation_timeout` + + * Value type is <> + * Default value is `3600` + +Connection pool configuration. +How often to validate a connection (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-last_run_metadata_path"] +===== `last_run_metadata_path` + + * Value type is <> + * Default value is `"/home/ph/.logstash_jdbc_last_run"` + +Path to file with last run time + +[id="{version}-plugins-{type}s-{plugin}-lowercase_column_names"] +===== `lowercase_column_names` + + * Value type is <> + * Default value is `true` + +Whether to force the lowercasing of identifier fields + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * Default value is `{}` + +Hash of query parameter, for example `{ "target_id" => "321" }` + +[id="{version}-plugins-{type}s-{plugin}-record_last_run"] +===== `record_last_run` + + * Value type is <> + * Default value is `true` + +Whether to save state or not in last_run_metadata_path + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically run statement, in Cron format +for example: "* * * * *" (execute query every minute, on the minute) + +There is no schedule by default. If no schedule is given, then the statement is run +exactly once. + +[id="{version}-plugins-{type}s-{plugin}-sequel_opts"] +===== `sequel_opts` + + * Value type is <> + * Default value is `{}` + +General/Vendor-specific Sequel configuration options. + +An example of an optional connection pool configuration + max_connections - The maximum number of connections the connection pool + +examples of vendor-specific options can be found in this +documentation page: https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc + +[id="{version}-plugins-{type}s-{plugin}-sql_log_level"] +===== `sql_log_level` + + * Value can be any of: `fatal`, `error`, `warn`, `info`, `debug` + * Default value is `"info"` + +Log level at which to log SQL queries, the accepted values are the common ones fatal, error, warn, +info and debug. The default value is info. + +[id="{version}-plugins-{type}s-{plugin}-statement"] +===== `statement` + + * Value type is <> + * There is no default value for this setting. + +If undefined, Logstash will complain, even if codec is unused. +Statement to execute + +To use parameters, use named parameter syntax. +For example: + +[source, ruby] +----------------------------------------------- +"SELECT * FROM MYTABLE WHERE id = :target_id" +----------------------------------------------- + +here, ":target_id" is a named parameter. You can configure named parameters +with the `parameters` setting. + +[id="{version}-plugins-{type}s-{plugin}-statement_filepath"] +===== `statement_filepath` + + * Value type is <> + * There is no default value for this setting. + +Path of file containing statement to execute + +[id="{version}-plugins-{type}s-{plugin}-tracking_column"] +===== `tracking_column` + + * Value type is <> + * There is no default value for this setting. + +If tracking column value rather than timestamp, the column whose value is to be tracked + +[id="{version}-plugins-{type}s-{plugin}-tracking_column_type"] +===== `tracking_column_type` + + * Value can be any of: `numeric`, `timestamp` + * Default value is `"numeric"` + +Type of tracking column. Currently only "numeric" and "timestamp" + +[id="{version}-plugins-{type}s-{plugin}-use_column_value"] +===== `use_column_value` + + * Value type is <> + * Default value is `false` + +Use an incremental column value rather than a timestamp + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jdbc-v4.2.4.asciidoc b/docs/versioned-plugins/inputs/jdbc-v4.2.4.asciidoc new file mode 100644 index 000000000..b6545a6dc --- /dev/null +++ b/docs/versioned-plugins/inputs/jdbc-v4.2.4.asciidoc @@ -0,0 +1,486 @@ +:plugin: jdbc +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.2.4 +:release_date: 2017-08-21 +:changelog_url: https://github.com/logstash-plugins/logstash-input-jdbc/blob/v4.2.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Jdbc input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This plugin was created as a way to ingest data in any database +with a JDBC interface into Logstash. You can periodically schedule ingestion +using a cron syntax (see `schedule` setting) or run the query one time to load +data into Logstash. Each row in the resultset becomes a single event. +Columns in the resultset are converted into fields in the event. + +==== Drivers + +This plugin does not come packaged with JDBC driver libraries. The desired +jdbc driver library must be explicitly passed in to the plugin using the +`jdbc_driver_library` configuration option. + +==== Scheduling + +Input from this plugin can be scheduled to run periodically according to a specific +schedule. This scheduling syntax is powered by https://github.com/jmettraux/rufus-scheduler[rufus-scheduler]. +The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ). + +Examples: + +|========================================================== +| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. +| `0 * * * *` | will execute on the 0th minute of every hour every day. +| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. +|========================================================== + + +Further documentation describing this syntax can be found https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings[here]. + +==== State + +The plugin will persist the `sql_last_value` parameter in the form of a +metadata file stored in the configured `last_run_metadata_path`. Upon query execution, +this file will be updated with the current value of `sql_last_value`. Next time +the pipeline starts up, this value will be updated by reading from the file. If +`clean_run` is set to true, this value will be ignored and `sql_last_value` will be +set to Jan 1, 1970, or 0 if `use_column_value` is true, as if no query has ever been executed. + +==== Dealing With Large Result-sets + +Many JDBC drivers use the `fetch_size` parameter to limit how many +results are pre-fetched at a time from the cursor into the client's cache +before retrieving more results from the result-set. This is configured in +this plugin using the `jdbc_fetch_size` configuration option. No fetch size +is set by default in this plugin, so the specific driver's default size will +be used. + +==== Usage: + +Here is an example of setting up the plugin to fetch data from a MySQL database. +First, we place the appropriate JDBC driver library in our current +path (this can be placed anywhere on your filesystem). In this example, we connect to +the 'mydb' database using the user: 'mysql' and wish to input all rows in the 'songs' +table that match a specific artist. The following examples demonstrates a possible +Logstash configuration for this. The `schedule` option in this example will +instruct the plugin to execute this input statement on the minute, every minute. + +[source,ruby] +------------------------------------------------------------------------------ +input { + jdbc { + jdbc_driver_library => "mysql-connector-java-5.1.36-bin.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://localhost:3306/mydb" + jdbc_user => "mysql" + parameters => { "favorite_artist" => "Beethoven" } + schedule => "* * * * *" + statement => "SELECT * from songs where artist = :favorite_artist" + } +} +------------------------------------------------------------------------------ + +==== Configuring SQL statement + +A sql statement is required for this input. This can be passed-in via a +statement option in the form of a string, or read from a file (`statement_filepath`). File +option is typically used when the SQL statement is large or cumbersome to supply in the config. +The file option only supports one SQL statement. The plugin will only accept one of the options. +It cannot read a statement from a file as well as from the `statement` configuration parameter. + +==== Configuring multiple SQL statements + +Configuring multiple SQL statements is useful when there is a need to query and ingest data +from different database tables or views. It is possible to define separate Logstash +configuration files for each statement or to define multiple statements in a single configuration +file. When using multiple statements in a single Logstash configuration file, each statement +has to be defined as a separate jdbc input (including jdbc driver, connection string and other +required parameters). + +Please note that if any of the statements use the `sql_last_value` parameter (e.g. for +ingesting only data changed since last run), each input should define its own +`last_run_metadata_path` parameter. Failure to do so will result in undesired behaviour, as +all inputs will store their state to the same (default) metadata file, effectively +overwriting each other's `sql_last_value`. + +==== Predefined Parameters + +Some parameters are built-in and can be used from within your queries. +Here is the list: + +|========================================================== +|sql_last_value | The value used to calculate which rows to query. Before any query is run, +this is set to Thursday, 1 January 1970, or 0 if `use_column_value` is true and +`tracking_column` is set. It is updated accordingly after subsequent queries are run. +|========================================================== + +Example: +[source,ruby] +--------------------------------------------------------------------------------------------------- +input { + jdbc { + statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value" + use_column_value => true + tracking_column => "id" + # ... other configuration bits + } +} +--------------------------------------------------------------------------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jdbc Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-clean_run>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-columns_charset>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_default_timezone>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_fetch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_page_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password_filepath>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-last_run_metadata_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lowercase_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-record_last_run>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sequel_opts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sql_log_level>> |<>, one of `["fatal", "error", "warn", "info", "debug"]`|No +| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-statement_filepath>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-tracking_column>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tracking_column_type>> |<>, one of `["numeric", "timestamp"]`|No +| <<{version}-plugins-{type}s-{plugin}-use_column_value>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-clean_run"] +===== `clean_run` + + * Value type is <> + * Default value is `false` + +Whether the previous run state should be preserved + +[id="{version}-plugins-{type}s-{plugin}-columns_charset"] +===== `columns_charset` + + * Value type is <> + * Default value is `{}` + +The character encoding for specific columns. This option will override the `:charset` option +for the specified columns. + +Example: +[source,ruby] +------------------------------------------------------- +input { + jdbc { + ... + columns_charset => { "column0" => "ISO-8859-1" } + ... + } +} +------------------------------------------------------- +this will only convert column0 that has ISO-8859-1 as an original encoding. + +[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts"] +===== `connection_retry_attempts` + + * Value type is <> + * Default value is `1` + +Maximum number of times to try connecting to database + +[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time"] +===== `connection_retry_attempts_wait_time` + + * Value type is <> + * Default value is `0.5` + +Number of seconds to sleep between connection attempts + +[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] +===== `jdbc_connection_string` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC connection string + +[id="{version}-plugins-{type}s-{plugin}-jdbc_default_timezone"] +===== `jdbc_default_timezone` + + * Value type is <> + * There is no default value for this setting. + +Timezone conversion. +SQL does not allow for timezone data in timestamp fields. This plugin will automatically +convert your SQL timestamp fields to Logstash timestamps, in relative UTC time in ISO8601 format. + +Using this setting will manually assign a specified timezone offset, instead +of using the timezone setting of the local machine. You must use a canonical +timezone, *America/Denver*, for example. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] +===== `jdbc_driver_class` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC driver class to load, for exmaple, "org.apache.derby.jdbc.ClientDriver" +NB per https://github.com/logstash-plugins/logstash-input-jdbc/issues/43 if you are using +the Oracle JDBC driver (ojdbc6.jar) the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"` + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] +===== `jdbc_driver_library` + + * Value type is <> + * There is no default value for this setting. + +Tentative of abstracting JDBC logic to a mixin +for potential reuse in other plugins (input/output) +This method is called when someone includes this module +Add these methods to the 'base' given. +JDBC driver library path to third party driver library. In case of multiple libraries being +required you can pass them separated by a comma. + +If not provided, Plugin will look for the driver class in the Logstash Java classpath. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_fetch_size"] +===== `jdbc_fetch_size` + + * Value type is <> + * There is no default value for this setting. + +JDBC fetch size. if not provided, respective driver's default will be used + +[id="{version}-plugins-{type}s-{plugin}-jdbc_page_size"] +===== `jdbc_page_size` + + * Value type is <> + * Default value is `100000` + +JDBC page size + +[id="{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled"] +===== `jdbc_paging_enabled` + + * Value type is <> + * Default value is `false` + +JDBC enable paging + +This will cause a sql statement to be broken up into multiple queries. +Each query will use limits and offsets to collectively retrieve the full +result-set. The limit size is set with `jdbc_page_size`. + +Be aware that ordering is not guaranteed between queries. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] +===== `jdbc_password` + + * Value type is <> + * There is no default value for this setting. + +JDBC password + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password_filepath"] +===== `jdbc_password_filepath` + + * Value type is <> + * There is no default value for this setting. + +JDBC password filename + +[id="{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout"] +===== `jdbc_pool_timeout` + + * Value type is <> + * Default value is `5` + +Connection pool configuration. +The amount of seconds to wait to acquire a connection before raising a PoolTimeoutError (default 5) + +[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] +===== `jdbc_user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC user + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] +===== `jdbc_validate_connection` + + * Value type is <> + * Default value is `false` + +Connection pool configuration. +Validate connection before use. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] +===== `jdbc_validation_timeout` + + * Value type is <> + * Default value is `3600` + +Connection pool configuration. +How often to validate a connection (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-last_run_metadata_path"] +===== `last_run_metadata_path` + + * Value type is <> + * Default value is `"/home/ph/.logstash_jdbc_last_run"` + +Path to file with last run time + +[id="{version}-plugins-{type}s-{plugin}-lowercase_column_names"] +===== `lowercase_column_names` + + * Value type is <> + * Default value is `true` + +Whether to force the lowercasing of identifier fields + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * Default value is `{}` + +Hash of query parameter, for example `{ "target_id" => "321" }` + +[id="{version}-plugins-{type}s-{plugin}-record_last_run"] +===== `record_last_run` + + * Value type is <> + * Default value is `true` + +Whether to save state or not in last_run_metadata_path + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically run statement, in Cron format +for example: "* * * * *" (execute query every minute, on the minute) + +There is no schedule by default. If no schedule is given, then the statement is run +exactly once. + +[id="{version}-plugins-{type}s-{plugin}-sequel_opts"] +===== `sequel_opts` + + * Value type is <> + * Default value is `{}` + +General/Vendor-specific Sequel configuration options. + +An example of an optional connection pool configuration + max_connections - The maximum number of connections the connection pool + +examples of vendor-specific options can be found in this +documentation page: https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc + +[id="{version}-plugins-{type}s-{plugin}-sql_log_level"] +===== `sql_log_level` + + * Value can be any of: `fatal`, `error`, `warn`, `info`, `debug` + * Default value is `"info"` + +Log level at which to log SQL queries, the accepted values are the common ones fatal, error, warn, +info and debug. The default value is info. + +[id="{version}-plugins-{type}s-{plugin}-statement"] +===== `statement` + + * Value type is <> + * There is no default value for this setting. + +If undefined, Logstash will complain, even if codec is unused. +Statement to execute + +To use parameters, use named parameter syntax. +For example: + +[source, ruby] +----------------------------------------------- +"SELECT * FROM MYTABLE WHERE id = :target_id" +----------------------------------------------- + +here, ":target_id" is a named parameter. You can configure named parameters +with the `parameters` setting. + +[id="{version}-plugins-{type}s-{plugin}-statement_filepath"] +===== `statement_filepath` + + * Value type is <> + * There is no default value for this setting. + +Path of file containing statement to execute + +[id="{version}-plugins-{type}s-{plugin}-tracking_column"] +===== `tracking_column` + + * Value type is <> + * There is no default value for this setting. + +If tracking column value rather than timestamp, the column whose value is to be tracked + +[id="{version}-plugins-{type}s-{plugin}-tracking_column_type"] +===== `tracking_column_type` + + * Value can be any of: `numeric`, `timestamp` + * Default value is `"numeric"` + +Type of tracking column. Currently only "numeric" and "timestamp" + +[id="{version}-plugins-{type}s-{plugin}-use_column_value"] +===== `use_column_value` + + * Value type is <> + * Default value is `false` + +Use an incremental column value rather than a timestamp + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jdbc-v4.3.0.asciidoc b/docs/versioned-plugins/inputs/jdbc-v4.3.0.asciidoc new file mode 100644 index 000000000..1e1eed5aa --- /dev/null +++ b/docs/versioned-plugins/inputs/jdbc-v4.3.0.asciidoc @@ -0,0 +1,486 @@ +:plugin: jdbc +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.3.0 +:release_date: 2017-10-27 +:changelog_url: https://github.com/logstash-plugins/logstash-input-jdbc/blob/v4.3.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Jdbc input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This plugin was created as a way to ingest data in any database +with a JDBC interface into Logstash. You can periodically schedule ingestion +using a cron syntax (see `schedule` setting) or run the query one time to load +data into Logstash. Each row in the resultset becomes a single event. +Columns in the resultset are converted into fields in the event. + +==== Drivers + +This plugin does not come packaged with JDBC driver libraries. The desired +jdbc driver library must be explicitly passed in to the plugin using the +`jdbc_driver_library` configuration option. + +==== Scheduling + +Input from this plugin can be scheduled to run periodically according to a specific +schedule. This scheduling syntax is powered by https://github.com/jmettraux/rufus-scheduler[rufus-scheduler]. +The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ). + +Examples: + +|========================================================== +| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. +| `0 * * * *` | will execute on the 0th minute of every hour every day. +| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. +|========================================================== + + +Further documentation describing this syntax can be found https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings[here]. + +==== State + +The plugin will persist the `sql_last_value` parameter in the form of a +metadata file stored in the configured `last_run_metadata_path`. Upon query execution, +this file will be updated with the current value of `sql_last_value`. Next time +the pipeline starts up, this value will be updated by reading from the file. If +`clean_run` is set to true, this value will be ignored and `sql_last_value` will be +set to Jan 1, 1970, or 0 if `use_column_value` is true, as if no query has ever been executed. + +==== Dealing With Large Result-sets + +Many JDBC drivers use the `fetch_size` parameter to limit how many +results are pre-fetched at a time from the cursor into the client's cache +before retrieving more results from the result-set. This is configured in +this plugin using the `jdbc_fetch_size` configuration option. No fetch size +is set by default in this plugin, so the specific driver's default size will +be used. + +==== Usage: + +Here is an example of setting up the plugin to fetch data from a MySQL database. +First, we place the appropriate JDBC driver library in our current +path (this can be placed anywhere on your filesystem). In this example, we connect to +the 'mydb' database using the user: 'mysql' and wish to input all rows in the 'songs' +table that match a specific artist. The following examples demonstrates a possible +Logstash configuration for this. The `schedule` option in this example will +instruct the plugin to execute this input statement on the minute, every minute. + +[source,ruby] +------------------------------------------------------------------------------ +input { + jdbc { + jdbc_driver_library => "mysql-connector-java-5.1.36-bin.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://localhost:3306/mydb" + jdbc_user => "mysql" + parameters => { "favorite_artist" => "Beethoven" } + schedule => "* * * * *" + statement => "SELECT * from songs where artist = :favorite_artist" + } +} +------------------------------------------------------------------------------ + +==== Configuring SQL statement + +A sql statement is required for this input. This can be passed-in via a +statement option in the form of a string, or read from a file (`statement_filepath`). File +option is typically used when the SQL statement is large or cumbersome to supply in the config. +The file option only supports one SQL statement. The plugin will only accept one of the options. +It cannot read a statement from a file as well as from the `statement` configuration parameter. + +==== Configuring multiple SQL statements + +Configuring multiple SQL statements is useful when there is a need to query and ingest data +from different database tables or views. It is possible to define separate Logstash +configuration files for each statement or to define multiple statements in a single configuration +file. When using multiple statements in a single Logstash configuration file, each statement +has to be defined as a separate jdbc input (including jdbc driver, connection string and other +required parameters). + +Please note that if any of the statements use the `sql_last_value` parameter (e.g. for +ingesting only data changed since last run), each input should define its own +`last_run_metadata_path` parameter. Failure to do so will result in undesired behaviour, as +all inputs will store their state to the same (default) metadata file, effectively +overwriting each other's `sql_last_value`. + +==== Predefined Parameters + +Some parameters are built-in and can be used from within your queries. +Here is the list: + +|========================================================== +|sql_last_value | The value used to calculate which rows to query. Before any query is run, +this is set to Thursday, 1 January 1970, or 0 if `use_column_value` is true and +`tracking_column` is set. It is updated accordingly after subsequent queries are run. +|========================================================== + +Example: +[source,ruby] +--------------------------------------------------------------------------------------------------- +input { + jdbc { + statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value" + use_column_value => true + tracking_column => "id" + # ... other configuration bits + } +} +--------------------------------------------------------------------------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jdbc Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-clean_run>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-columns_charset>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_default_timezone>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_fetch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_page_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password_filepath>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-last_run_metadata_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lowercase_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-record_last_run>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sequel_opts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sql_log_level>> |<>, one of `["fatal", "error", "warn", "info", "debug"]`|No +| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-statement_filepath>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-tracking_column>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tracking_column_type>> |<>, one of `["numeric", "timestamp"]`|No +| <<{version}-plugins-{type}s-{plugin}-use_column_value>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-clean_run"] +===== `clean_run` + + * Value type is <> + * Default value is `false` + +Whether the previous run state should be preserved + +[id="{version}-plugins-{type}s-{plugin}-columns_charset"] +===== `columns_charset` + + * Value type is <> + * Default value is `{}` + +The character encoding for specific columns. This option will override the `:charset` option +for the specified columns. + +Example: +[source,ruby] +------------------------------------------------------- +input { + jdbc { + ... + columns_charset => { "column0" => "ISO-8859-1" } + ... + } +} +------------------------------------------------------- +this will only convert column0 that has ISO-8859-1 as an original encoding. + +[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts"] +===== `connection_retry_attempts` + + * Value type is <> + * Default value is `1` + +Maximum number of times to try connecting to database + +[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time"] +===== `connection_retry_attempts_wait_time` + + * Value type is <> + * Default value is `0.5` + +Number of seconds to sleep between connection attempts + +[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] +===== `jdbc_connection_string` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC connection string + +[id="{version}-plugins-{type}s-{plugin}-jdbc_default_timezone"] +===== `jdbc_default_timezone` + + * Value type is <> + * There is no default value for this setting. + +Timezone conversion. +SQL does not allow for timezone data in timestamp fields. This plugin will automatically +convert your SQL timestamp fields to Logstash timestamps, in relative UTC time in ISO8601 format. + +Using this setting will manually assign a specified timezone offset, instead +of using the timezone setting of the local machine. You must use a canonical +timezone, *America/Denver*, for example. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] +===== `jdbc_driver_class` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC driver class to load, for exmaple, "org.apache.derby.jdbc.ClientDriver" +NB per https://github.com/logstash-plugins/logstash-input-jdbc/issues/43 if you are using +the Oracle JDBC driver (ojdbc6.jar) the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"` + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] +===== `jdbc_driver_library` + + * Value type is <> + * There is no default value for this setting. + +Tentative of abstracting JDBC logic to a mixin +for potential reuse in other plugins (input/output) +This method is called when someone includes this module +Add these methods to the 'base' given. +JDBC driver library path to third party driver library. In case of multiple libraries being +required you can pass them separated by a comma. + +If not provided, Plugin will look for the driver class in the Logstash Java classpath. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_fetch_size"] +===== `jdbc_fetch_size` + + * Value type is <> + * There is no default value for this setting. + +JDBC fetch size. if not provided, respective driver's default will be used + +[id="{version}-plugins-{type}s-{plugin}-jdbc_page_size"] +===== `jdbc_page_size` + + * Value type is <> + * Default value is `100000` + +JDBC page size + +[id="{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled"] +===== `jdbc_paging_enabled` + + * Value type is <> + * Default value is `false` + +JDBC enable paging + +This will cause a sql statement to be broken up into multiple queries. +Each query will use limits and offsets to collectively retrieve the full +result-set. The limit size is set with `jdbc_page_size`. + +Be aware that ordering is not guaranteed between queries. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] +===== `jdbc_password` + + * Value type is <> + * There is no default value for this setting. + +JDBC password + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password_filepath"] +===== `jdbc_password_filepath` + + * Value type is <> + * There is no default value for this setting. + +JDBC password filename + +[id="{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout"] +===== `jdbc_pool_timeout` + + * Value type is <> + * Default value is `5` + +Connection pool configuration. +The amount of seconds to wait to acquire a connection before raising a PoolTimeoutError (default 5) + +[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] +===== `jdbc_user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC user + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] +===== `jdbc_validate_connection` + + * Value type is <> + * Default value is `false` + +Connection pool configuration. +Validate connection before use. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] +===== `jdbc_validation_timeout` + + * Value type is <> + * Default value is `3600` + +Connection pool configuration. +How often to validate a connection (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-last_run_metadata_path"] +===== `last_run_metadata_path` + + * Value type is <> + * Default value is `"/home/ph/.logstash_jdbc_last_run"` + +Path to file with last run time + +[id="{version}-plugins-{type}s-{plugin}-lowercase_column_names"] +===== `lowercase_column_names` + + * Value type is <> + * Default value is `true` + +Whether to force the lowercasing of identifier fields + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * Default value is `{}` + +Hash of query parameter, for example `{ "target_id" => "321" }` + +[id="{version}-plugins-{type}s-{plugin}-record_last_run"] +===== `record_last_run` + + * Value type is <> + * Default value is `true` + +Whether to save state or not in last_run_metadata_path + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically run statement, in Cron format +for example: "* * * * *" (execute query every minute, on the minute) + +There is no schedule by default. If no schedule is given, then the statement is run +exactly once. + +[id="{version}-plugins-{type}s-{plugin}-sequel_opts"] +===== `sequel_opts` + + * Value type is <> + * Default value is `{}` + +General/Vendor-specific Sequel configuration options. + +An example of an optional connection pool configuration + max_connections - The maximum number of connections the connection pool + +examples of vendor-specific options can be found in this +documentation page: https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc + +[id="{version}-plugins-{type}s-{plugin}-sql_log_level"] +===== `sql_log_level` + + * Value can be any of: `fatal`, `error`, `warn`, `info`, `debug` + * Default value is `"info"` + +Log level at which to log SQL queries, the accepted values are the common ones fatal, error, warn, +info and debug. The default value is info. + +[id="{version}-plugins-{type}s-{plugin}-statement"] +===== `statement` + + * Value type is <> + * There is no default value for this setting. + +If undefined, Logstash will complain, even if codec is unused. +Statement to execute + +To use parameters, use named parameter syntax. +For example: + +[source, ruby] +----------------------------------------------- +"SELECT * FROM MYTABLE WHERE id = :target_id" +----------------------------------------------- + +here, ":target_id" is a named parameter. You can configure named parameters +with the `parameters` setting. + +[id="{version}-plugins-{type}s-{plugin}-statement_filepath"] +===== `statement_filepath` + + * Value type is <> + * There is no default value for this setting. + +Path of file containing statement to execute + +[id="{version}-plugins-{type}s-{plugin}-tracking_column"] +===== `tracking_column` + + * Value type is <> + * There is no default value for this setting. + +If tracking column value rather than timestamp, the column whose value is to be tracked + +[id="{version}-plugins-{type}s-{plugin}-tracking_column_type"] +===== `tracking_column_type` + + * Value can be any of: `numeric`, `timestamp` + * Default value is `"numeric"` + +Type of tracking column. Currently only "numeric" and "timestamp" + +[id="{version}-plugins-{type}s-{plugin}-use_column_value"] +===== `use_column_value` + + * Value type is <> + * Default value is `false` + +Use an incremental column value rather than a timestamp + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jdbc-v4.3.1.asciidoc b/docs/versioned-plugins/inputs/jdbc-v4.3.1.asciidoc new file mode 100644 index 000000000..d04ebd886 --- /dev/null +++ b/docs/versioned-plugins/inputs/jdbc-v4.3.1.asciidoc @@ -0,0 +1,486 @@ +:plugin: jdbc +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.3.1 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-jdbc/blob/v4.3.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Jdbc input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This plugin was created as a way to ingest data in any database +with a JDBC interface into Logstash. You can periodically schedule ingestion +using a cron syntax (see `schedule` setting) or run the query one time to load +data into Logstash. Each row in the resultset becomes a single event. +Columns in the resultset are converted into fields in the event. + +==== Drivers + +This plugin does not come packaged with JDBC driver libraries. The desired +jdbc driver library must be explicitly passed in to the plugin using the +`jdbc_driver_library` configuration option. + +==== Scheduling + +Input from this plugin can be scheduled to run periodically according to a specific +schedule. This scheduling syntax is powered by https://github.com/jmettraux/rufus-scheduler[rufus-scheduler]. +The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ). + +Examples: + +|========================================================== +| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. +| `0 * * * *` | will execute on the 0th minute of every hour every day. +| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. +|========================================================== + + +Further documentation describing this syntax can be found https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings[here]. + +==== State + +The plugin will persist the `sql_last_value` parameter in the form of a +metadata file stored in the configured `last_run_metadata_path`. Upon query execution, +this file will be updated with the current value of `sql_last_value`. Next time +the pipeline starts up, this value will be updated by reading from the file. If +`clean_run` is set to true, this value will be ignored and `sql_last_value` will be +set to Jan 1, 1970, or 0 if `use_column_value` is true, as if no query has ever been executed. + +==== Dealing With Large Result-sets + +Many JDBC drivers use the `fetch_size` parameter to limit how many +results are pre-fetched at a time from the cursor into the client's cache +before retrieving more results from the result-set. This is configured in +this plugin using the `jdbc_fetch_size` configuration option. No fetch size +is set by default in this plugin, so the specific driver's default size will +be used. + +==== Usage: + +Here is an example of setting up the plugin to fetch data from a MySQL database. +First, we place the appropriate JDBC driver library in our current +path (this can be placed anywhere on your filesystem). In this example, we connect to +the 'mydb' database using the user: 'mysql' and wish to input all rows in the 'songs' +table that match a specific artist. The following examples demonstrates a possible +Logstash configuration for this. The `schedule` option in this example will +instruct the plugin to execute this input statement on the minute, every minute. + +[source,ruby] +------------------------------------------------------------------------------ +input { + jdbc { + jdbc_driver_library => "mysql-connector-java-5.1.36-bin.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://localhost:3306/mydb" + jdbc_user => "mysql" + parameters => { "favorite_artist" => "Beethoven" } + schedule => "* * * * *" + statement => "SELECT * from songs where artist = :favorite_artist" + } +} +------------------------------------------------------------------------------ + +==== Configuring SQL statement + +A sql statement is required for this input. This can be passed-in via a +statement option in the form of a string, or read from a file (`statement_filepath`). File +option is typically used when the SQL statement is large or cumbersome to supply in the config. +The file option only supports one SQL statement. The plugin will only accept one of the options. +It cannot read a statement from a file as well as from the `statement` configuration parameter. + +==== Configuring multiple SQL statements + +Configuring multiple SQL statements is useful when there is a need to query and ingest data +from different database tables or views. It is possible to define separate Logstash +configuration files for each statement or to define multiple statements in a single configuration +file. When using multiple statements in a single Logstash configuration file, each statement +has to be defined as a separate jdbc input (including jdbc driver, connection string and other +required parameters). + +Please note that if any of the statements use the `sql_last_value` parameter (e.g. for +ingesting only data changed since last run), each input should define its own +`last_run_metadata_path` parameter. Failure to do so will result in undesired behaviour, as +all inputs will store their state to the same (default) metadata file, effectively +overwriting each other's `sql_last_value`. + +==== Predefined Parameters + +Some parameters are built-in and can be used from within your queries. +Here is the list: + +|========================================================== +|sql_last_value | The value used to calculate which rows to query. Before any query is run, +this is set to Thursday, 1 January 1970, or 0 if `use_column_value` is true and +`tracking_column` is set. It is updated accordingly after subsequent queries are run. +|========================================================== + +Example: +[source,ruby] +--------------------------------------------------------------------------------------------------- +input { + jdbc { + statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value" + use_column_value => true + tracking_column => "id" + # ... other configuration bits + } +} +--------------------------------------------------------------------------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jdbc Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-clean_run>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-columns_charset>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_default_timezone>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_fetch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_page_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password_filepath>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-last_run_metadata_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lowercase_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-record_last_run>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sequel_opts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sql_log_level>> |<>, one of `["fatal", "error", "warn", "info", "debug"]`|No +| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-statement_filepath>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-tracking_column>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tracking_column_type>> |<>, one of `["numeric", "timestamp"]`|No +| <<{version}-plugins-{type}s-{plugin}-use_column_value>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-clean_run"] +===== `clean_run` + + * Value type is <> + * Default value is `false` + +Whether the previous run state should be preserved + +[id="{version}-plugins-{type}s-{plugin}-columns_charset"] +===== `columns_charset` + + * Value type is <> + * Default value is `{}` + +The character encoding for specific columns. This option will override the `:charset` option +for the specified columns. + +Example: +[source,ruby] +------------------------------------------------------- +input { + jdbc { + ... + columns_charset => { "column0" => "ISO-8859-1" } + ... + } +} +------------------------------------------------------- +this will only convert column0 that has ISO-8859-1 as an original encoding. + +[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts"] +===== `connection_retry_attempts` + + * Value type is <> + * Default value is `1` + +Maximum number of times to try connecting to database + +[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time"] +===== `connection_retry_attempts_wait_time` + + * Value type is <> + * Default value is `0.5` + +Number of seconds to sleep between connection attempts + +[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] +===== `jdbc_connection_string` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC connection string + +[id="{version}-plugins-{type}s-{plugin}-jdbc_default_timezone"] +===== `jdbc_default_timezone` + + * Value type is <> + * There is no default value for this setting. + +Timezone conversion. +SQL does not allow for timezone data in timestamp fields. This plugin will automatically +convert your SQL timestamp fields to Logstash timestamps, in relative UTC time in ISO8601 format. + +Using this setting will manually assign a specified timezone offset, instead +of using the timezone setting of the local machine. You must use a canonical +timezone, *America/Denver*, for example. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] +===== `jdbc_driver_class` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC driver class to load, for exmaple, "org.apache.derby.jdbc.ClientDriver" +NB per https://github.com/logstash-plugins/logstash-input-jdbc/issues/43 if you are using +the Oracle JDBC driver (ojdbc6.jar) the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"` + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] +===== `jdbc_driver_library` + + * Value type is <> + * There is no default value for this setting. + +Tentative of abstracting JDBC logic to a mixin +for potential reuse in other plugins (input/output) +This method is called when someone includes this module +Add these methods to the 'base' given. +JDBC driver library path to third party driver library. In case of multiple libraries being +required you can pass them separated by a comma. + +If not provided, Plugin will look for the driver class in the Logstash Java classpath. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_fetch_size"] +===== `jdbc_fetch_size` + + * Value type is <> + * There is no default value for this setting. + +JDBC fetch size. if not provided, respective driver's default will be used + +[id="{version}-plugins-{type}s-{plugin}-jdbc_page_size"] +===== `jdbc_page_size` + + * Value type is <> + * Default value is `100000` + +JDBC page size + +[id="{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled"] +===== `jdbc_paging_enabled` + + * Value type is <> + * Default value is `false` + +JDBC enable paging + +This will cause a sql statement to be broken up into multiple queries. +Each query will use limits and offsets to collectively retrieve the full +result-set. The limit size is set with `jdbc_page_size`. + +Be aware that ordering is not guaranteed between queries. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] +===== `jdbc_password` + + * Value type is <> + * There is no default value for this setting. + +JDBC password + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password_filepath"] +===== `jdbc_password_filepath` + + * Value type is <> + * There is no default value for this setting. + +JDBC password filename + +[id="{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout"] +===== `jdbc_pool_timeout` + + * Value type is <> + * Default value is `5` + +Connection pool configuration. +The amount of seconds to wait to acquire a connection before raising a PoolTimeoutError (default 5) + +[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] +===== `jdbc_user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC user + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] +===== `jdbc_validate_connection` + + * Value type is <> + * Default value is `false` + +Connection pool configuration. +Validate connection before use. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] +===== `jdbc_validation_timeout` + + * Value type is <> + * Default value is `3600` + +Connection pool configuration. +How often to validate a connection (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-last_run_metadata_path"] +===== `last_run_metadata_path` + + * Value type is <> + * Default value is `"/home/ph/.logstash_jdbc_last_run"` + +Path to file with last run time + +[id="{version}-plugins-{type}s-{plugin}-lowercase_column_names"] +===== `lowercase_column_names` + + * Value type is <> + * Default value is `true` + +Whether to force the lowercasing of identifier fields + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * Default value is `{}` + +Hash of query parameter, for example `{ "target_id" => "321" }` + +[id="{version}-plugins-{type}s-{plugin}-record_last_run"] +===== `record_last_run` + + * Value type is <> + * Default value is `true` + +Whether to save state or not in last_run_metadata_path + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically run statement, in Cron format +for example: "* * * * *" (execute query every minute, on the minute) + +There is no schedule by default. If no schedule is given, then the statement is run +exactly once. + +[id="{version}-plugins-{type}s-{plugin}-sequel_opts"] +===== `sequel_opts` + + * Value type is <> + * Default value is `{}` + +General/Vendor-specific Sequel configuration options. + +An example of an optional connection pool configuration + max_connections - The maximum number of connections the connection pool + +examples of vendor-specific options can be found in this +documentation page: https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc + +[id="{version}-plugins-{type}s-{plugin}-sql_log_level"] +===== `sql_log_level` + + * Value can be any of: `fatal`, `error`, `warn`, `info`, `debug` + * Default value is `"info"` + +Log level at which to log SQL queries, the accepted values are the common ones fatal, error, warn, +info and debug. The default value is info. + +[id="{version}-plugins-{type}s-{plugin}-statement"] +===== `statement` + + * Value type is <> + * There is no default value for this setting. + +If undefined, Logstash will complain, even if codec is unused. +Statement to execute + +To use parameters, use named parameter syntax. +For example: + +[source, ruby] +----------------------------------------------- +"SELECT * FROM MYTABLE WHERE id = :target_id" +----------------------------------------------- + +here, ":target_id" is a named parameter. You can configure named parameters +with the `parameters` setting. + +[id="{version}-plugins-{type}s-{plugin}-statement_filepath"] +===== `statement_filepath` + + * Value type is <> + * There is no default value for this setting. + +Path of file containing statement to execute + +[id="{version}-plugins-{type}s-{plugin}-tracking_column"] +===== `tracking_column` + + * Value type is <> + * There is no default value for this setting. + +If tracking column value rather than timestamp, the column whose value is to be tracked + +[id="{version}-plugins-{type}s-{plugin}-tracking_column_type"] +===== `tracking_column_type` + + * Value can be any of: `numeric`, `timestamp` + * Default value is `"numeric"` + +Type of tracking column. Currently only "numeric" and "timestamp" + +[id="{version}-plugins-{type}s-{plugin}-use_column_value"] +===== `use_column_value` + + * Value type is <> + * Default value is `false` + +Use an incremental column value rather than a timestamp + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jdbc-v4.3.2.asciidoc b/docs/versioned-plugins/inputs/jdbc-v4.3.2.asciidoc new file mode 100644 index 000000000..2c96afd5c --- /dev/null +++ b/docs/versioned-plugins/inputs/jdbc-v4.3.2.asciidoc @@ -0,0 +1,486 @@ +:plugin: jdbc +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.3.2 +:release_date: 2017-12-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-jdbc/blob/v4.3.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Jdbc input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This plugin was created as a way to ingest data in any database +with a JDBC interface into Logstash. You can periodically schedule ingestion +using a cron syntax (see `schedule` setting) or run the query one time to load +data into Logstash. Each row in the resultset becomes a single event. +Columns in the resultset are converted into fields in the event. + +==== Drivers + +This plugin does not come packaged with JDBC driver libraries. The desired +jdbc driver library must be explicitly passed in to the plugin using the +`jdbc_driver_library` configuration option. + +==== Scheduling + +Input from this plugin can be scheduled to run periodically according to a specific +schedule. This scheduling syntax is powered by https://github.com/jmettraux/rufus-scheduler[rufus-scheduler]. +The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ). + +Examples: + +|========================================================== +| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. +| `0 * * * *` | will execute on the 0th minute of every hour every day. +| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. +|========================================================== + + +Further documentation describing this syntax can be found https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings[here]. + +==== State + +The plugin will persist the `sql_last_value` parameter in the form of a +metadata file stored in the configured `last_run_metadata_path`. Upon query execution, +this file will be updated with the current value of `sql_last_value`. Next time +the pipeline starts up, this value will be updated by reading from the file. If +`clean_run` is set to true, this value will be ignored and `sql_last_value` will be +set to Jan 1, 1970, or 0 if `use_column_value` is true, as if no query has ever been executed. + +==== Dealing With Large Result-sets + +Many JDBC drivers use the `fetch_size` parameter to limit how many +results are pre-fetched at a time from the cursor into the client's cache +before retrieving more results from the result-set. This is configured in +this plugin using the `jdbc_fetch_size` configuration option. No fetch size +is set by default in this plugin, so the specific driver's default size will +be used. + +==== Usage: + +Here is an example of setting up the plugin to fetch data from a MySQL database. +First, we place the appropriate JDBC driver library in our current +path (this can be placed anywhere on your filesystem). In this example, we connect to +the 'mydb' database using the user: 'mysql' and wish to input all rows in the 'songs' +table that match a specific artist. The following examples demonstrates a possible +Logstash configuration for this. The `schedule` option in this example will +instruct the plugin to execute this input statement on the minute, every minute. + +[source,ruby] +------------------------------------------------------------------------------ +input { + jdbc { + jdbc_driver_library => "mysql-connector-java-5.1.36-bin.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://localhost:3306/mydb" + jdbc_user => "mysql" + parameters => { "favorite_artist" => "Beethoven" } + schedule => "* * * * *" + statement => "SELECT * from songs where artist = :favorite_artist" + } +} +------------------------------------------------------------------------------ + +==== Configuring SQL statement + +A sql statement is required for this input. This can be passed-in via a +statement option in the form of a string, or read from a file (`statement_filepath`). File +option is typically used when the SQL statement is large or cumbersome to supply in the config. +The file option only supports one SQL statement. The plugin will only accept one of the options. +It cannot read a statement from a file as well as from the `statement` configuration parameter. + +==== Configuring multiple SQL statements + +Configuring multiple SQL statements is useful when there is a need to query and ingest data +from different database tables or views. It is possible to define separate Logstash +configuration files for each statement or to define multiple statements in a single configuration +file. When using multiple statements in a single Logstash configuration file, each statement +has to be defined as a separate jdbc input (including jdbc driver, connection string and other +required parameters). + +Please note that if any of the statements use the `sql_last_value` parameter (e.g. for +ingesting only data changed since last run), each input should define its own +`last_run_metadata_path` parameter. Failure to do so will result in undesired behaviour, as +all inputs will store their state to the same (default) metadata file, effectively +overwriting each other's `sql_last_value`. + +==== Predefined Parameters + +Some parameters are built-in and can be used from within your queries. +Here is the list: + +|========================================================== +|sql_last_value | The value used to calculate which rows to query. Before any query is run, +this is set to Thursday, 1 January 1970, or 0 if `use_column_value` is true and +`tracking_column` is set. It is updated accordingly after subsequent queries are run. +|========================================================== + +Example: +[source,ruby] +--------------------------------------------------------------------------------------------------- +input { + jdbc { + statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value" + use_column_value => true + tracking_column => "id" + # ... other configuration bits + } +} +--------------------------------------------------------------------------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jdbc Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-clean_run>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-columns_charset>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_default_timezone>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_fetch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_page_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password_filepath>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-last_run_metadata_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lowercase_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-record_last_run>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sequel_opts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sql_log_level>> |<>, one of `["fatal", "error", "warn", "info", "debug"]`|No +| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-statement_filepath>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-tracking_column>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tracking_column_type>> |<>, one of `["numeric", "timestamp"]`|No +| <<{version}-plugins-{type}s-{plugin}-use_column_value>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-clean_run"] +===== `clean_run` + + * Value type is <> + * Default value is `false` + +Whether the previous run state should be preserved + +[id="{version}-plugins-{type}s-{plugin}-columns_charset"] +===== `columns_charset` + + * Value type is <> + * Default value is `{}` + +The character encoding for specific columns. This option will override the `:charset` option +for the specified columns. + +Example: +[source,ruby] +------------------------------------------------------- +input { + jdbc { + ... + columns_charset => { "column0" => "ISO-8859-1" } + ... + } +} +------------------------------------------------------- +this will only convert column0 that has ISO-8859-1 as an original encoding. + +[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts"] +===== `connection_retry_attempts` + + * Value type is <> + * Default value is `1` + +Maximum number of times to try connecting to database + +[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time"] +===== `connection_retry_attempts_wait_time` + + * Value type is <> + * Default value is `0.5` + +Number of seconds to sleep between connection attempts + +[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] +===== `jdbc_connection_string` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC connection string + +[id="{version}-plugins-{type}s-{plugin}-jdbc_default_timezone"] +===== `jdbc_default_timezone` + + * Value type is <> + * There is no default value for this setting. + +Timezone conversion. +SQL does not allow for timezone data in timestamp fields. This plugin will automatically +convert your SQL timestamp fields to Logstash timestamps, in relative UTC time in ISO8601 format. + +Using this setting will manually assign a specified timezone offset, instead +of using the timezone setting of the local machine. You must use a canonical +timezone, *America/Denver*, for example. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] +===== `jdbc_driver_class` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC driver class to load, for exmaple, "org.apache.derby.jdbc.ClientDriver" +NB per https://github.com/logstash-plugins/logstash-input-jdbc/issues/43 if you are using +the Oracle JDBC driver (ojdbc6.jar) the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"` + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] +===== `jdbc_driver_library` + + * Value type is <> + * There is no default value for this setting. + +Tentative of abstracting JDBC logic to a mixin +for potential reuse in other plugins (input/output) +This method is called when someone includes this module +Add these methods to the 'base' given. +JDBC driver library path to third party driver library. In case of multiple libraries being +required you can pass them separated by a comma. + +If not provided, Plugin will look for the driver class in the Logstash Java classpath. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_fetch_size"] +===== `jdbc_fetch_size` + + * Value type is <> + * There is no default value for this setting. + +JDBC fetch size. if not provided, respective driver's default will be used + +[id="{version}-plugins-{type}s-{plugin}-jdbc_page_size"] +===== `jdbc_page_size` + + * Value type is <> + * Default value is `100000` + +JDBC page size + +[id="{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled"] +===== `jdbc_paging_enabled` + + * Value type is <> + * Default value is `false` + +JDBC enable paging + +This will cause a sql statement to be broken up into multiple queries. +Each query will use limits and offsets to collectively retrieve the full +result-set. The limit size is set with `jdbc_page_size`. + +Be aware that ordering is not guaranteed between queries. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] +===== `jdbc_password` + + * Value type is <> + * There is no default value for this setting. + +JDBC password + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password_filepath"] +===== `jdbc_password_filepath` + + * Value type is <> + * There is no default value for this setting. + +JDBC password filename + +[id="{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout"] +===== `jdbc_pool_timeout` + + * Value type is <> + * Default value is `5` + +Connection pool configuration. +The amount of seconds to wait to acquire a connection before raising a PoolTimeoutError (default 5) + +[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] +===== `jdbc_user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC user + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] +===== `jdbc_validate_connection` + + * Value type is <> + * Default value is `false` + +Connection pool configuration. +Validate connection before use. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] +===== `jdbc_validation_timeout` + + * Value type is <> + * Default value is `3600` + +Connection pool configuration. +How often to validate a connection (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-last_run_metadata_path"] +===== `last_run_metadata_path` + + * Value type is <> + * Default value is `"/home/ph/.logstash_jdbc_last_run"` + +Path to file with last run time + +[id="{version}-plugins-{type}s-{plugin}-lowercase_column_names"] +===== `lowercase_column_names` + + * Value type is <> + * Default value is `true` + +Whether to force the lowercasing of identifier fields + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * Default value is `{}` + +Hash of query parameter, for example `{ "target_id" => "321" }` + +[id="{version}-plugins-{type}s-{plugin}-record_last_run"] +===== `record_last_run` + + * Value type is <> + * Default value is `true` + +Whether to save state or not in last_run_metadata_path + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically run statement, in Cron format +for example: "* * * * *" (execute query every minute, on the minute) + +There is no schedule by default. If no schedule is given, then the statement is run +exactly once. + +[id="{version}-plugins-{type}s-{plugin}-sequel_opts"] +===== `sequel_opts` + + * Value type is <> + * Default value is `{}` + +General/Vendor-specific Sequel configuration options. + +An example of an optional connection pool configuration + max_connections - The maximum number of connections the connection pool + +examples of vendor-specific options can be found in this +documentation page: https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc + +[id="{version}-plugins-{type}s-{plugin}-sql_log_level"] +===== `sql_log_level` + + * Value can be any of: `fatal`, `error`, `warn`, `info`, `debug` + * Default value is `"info"` + +Log level at which to log SQL queries, the accepted values are the common ones fatal, error, warn, +info and debug. The default value is info. + +[id="{version}-plugins-{type}s-{plugin}-statement"] +===== `statement` + + * Value type is <> + * There is no default value for this setting. + +If undefined, Logstash will complain, even if codec is unused. +Statement to execute + +To use parameters, use named parameter syntax. +For example: + +[source, ruby] +----------------------------------------------- +"SELECT * FROM MYTABLE WHERE id = :target_id" +----------------------------------------------- + +here, ":target_id" is a named parameter. You can configure named parameters +with the `parameters` setting. + +[id="{version}-plugins-{type}s-{plugin}-statement_filepath"] +===== `statement_filepath` + + * Value type is <> + * There is no default value for this setting. + +Path of file containing statement to execute + +[id="{version}-plugins-{type}s-{plugin}-tracking_column"] +===== `tracking_column` + + * Value type is <> + * There is no default value for this setting. + +If tracking column value rather than timestamp, the column whose value is to be tracked + +[id="{version}-plugins-{type}s-{plugin}-tracking_column_type"] +===== `tracking_column_type` + + * Value can be any of: `numeric`, `timestamp` + * Default value is `"numeric"` + +Type of tracking column. Currently only "numeric" and "timestamp" + +[id="{version}-plugins-{type}s-{plugin}-use_column_value"] +===== `use_column_value` + + * Value type is <> + * Default value is `false` + +Use an incremental column value rather than a timestamp + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jdbc-v4.3.3.asciidoc b/docs/versioned-plugins/inputs/jdbc-v4.3.3.asciidoc new file mode 100644 index 000000000..78dafe00a --- /dev/null +++ b/docs/versioned-plugins/inputs/jdbc-v4.3.3.asciidoc @@ -0,0 +1,486 @@ +:plugin: jdbc +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.3.3 +:release_date: 2017-12-14 +:changelog_url: https://github.com/logstash-plugins/logstash-input-jdbc/blob/v4.3.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Jdbc input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This plugin was created as a way to ingest data in any database +with a JDBC interface into Logstash. You can periodically schedule ingestion +using a cron syntax (see `schedule` setting) or run the query one time to load +data into Logstash. Each row in the resultset becomes a single event. +Columns in the resultset are converted into fields in the event. + +==== Drivers + +This plugin does not come packaged with JDBC driver libraries. The desired +jdbc driver library must be explicitly passed in to the plugin using the +`jdbc_driver_library` configuration option. + +==== Scheduling + +Input from this plugin can be scheduled to run periodically according to a specific +schedule. This scheduling syntax is powered by https://github.com/jmettraux/rufus-scheduler[rufus-scheduler]. +The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ). + +Examples: + +|========================================================== +| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. +| `0 * * * *` | will execute on the 0th minute of every hour every day. +| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. +|========================================================== + + +Further documentation describing this syntax can be found https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings[here]. + +==== State + +The plugin will persist the `sql_last_value` parameter in the form of a +metadata file stored in the configured `last_run_metadata_path`. Upon query execution, +this file will be updated with the current value of `sql_last_value`. Next time +the pipeline starts up, this value will be updated by reading from the file. If +`clean_run` is set to true, this value will be ignored and `sql_last_value` will be +set to Jan 1, 1970, or 0 if `use_column_value` is true, as if no query has ever been executed. + +==== Dealing With Large Result-sets + +Many JDBC drivers use the `fetch_size` parameter to limit how many +results are pre-fetched at a time from the cursor into the client's cache +before retrieving more results from the result-set. This is configured in +this plugin using the `jdbc_fetch_size` configuration option. No fetch size +is set by default in this plugin, so the specific driver's default size will +be used. + +==== Usage: + +Here is an example of setting up the plugin to fetch data from a MySQL database. +First, we place the appropriate JDBC driver library in our current +path (this can be placed anywhere on your filesystem). In this example, we connect to +the 'mydb' database using the user: 'mysql' and wish to input all rows in the 'songs' +table that match a specific artist. The following examples demonstrates a possible +Logstash configuration for this. The `schedule` option in this example will +instruct the plugin to execute this input statement on the minute, every minute. + +[source,ruby] +------------------------------------------------------------------------------ +input { + jdbc { + jdbc_driver_library => "mysql-connector-java-5.1.36-bin.jar" + jdbc_driver_class => "com.mysql.jdbc.Driver" + jdbc_connection_string => "jdbc:mysql://localhost:3306/mydb" + jdbc_user => "mysql" + parameters => { "favorite_artist" => "Beethoven" } + schedule => "* * * * *" + statement => "SELECT * from songs where artist = :favorite_artist" + } +} +------------------------------------------------------------------------------ + +==== Configuring SQL statement + +A sql statement is required for this input. This can be passed-in via a +statement option in the form of a string, or read from a file (`statement_filepath`). File +option is typically used when the SQL statement is large or cumbersome to supply in the config. +The file option only supports one SQL statement. The plugin will only accept one of the options. +It cannot read a statement from a file as well as from the `statement` configuration parameter. + +==== Configuring multiple SQL statements + +Configuring multiple SQL statements is useful when there is a need to query and ingest data +from different database tables or views. It is possible to define separate Logstash +configuration files for each statement or to define multiple statements in a single configuration +file. When using multiple statements in a single Logstash configuration file, each statement +has to be defined as a separate jdbc input (including jdbc driver, connection string and other +required parameters). + +Please note that if any of the statements use the `sql_last_value` parameter (e.g. for +ingesting only data changed since last run), each input should define its own +`last_run_metadata_path` parameter. Failure to do so will result in undesired behaviour, as +all inputs will store their state to the same (default) metadata file, effectively +overwriting each other's `sql_last_value`. + +==== Predefined Parameters + +Some parameters are built-in and can be used from within your queries. +Here is the list: + +|========================================================== +|sql_last_value | The value used to calculate which rows to query. Before any query is run, +this is set to Thursday, 1 January 1970, or 0 if `use_column_value` is true and +`tracking_column` is set. It is updated accordingly after subsequent queries are run. +|========================================================== + +Example: +[source,ruby] +--------------------------------------------------------------------------------------------------- +input { + jdbc { + statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value" + use_column_value => true + tracking_column => "id" + # ... other configuration bits + } +} +--------------------------------------------------------------------------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jdbc Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-clean_run>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-columns_charset>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_default_timezone>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_fetch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_page_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_password_filepath>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-last_run_metadata_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lowercase_column_names>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-record_last_run>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sequel_opts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sql_log_level>> |<>, one of `["fatal", "error", "warn", "info", "debug"]`|No +| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-statement_filepath>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-tracking_column>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tracking_column_type>> |<>, one of `["numeric", "timestamp"]`|No +| <<{version}-plugins-{type}s-{plugin}-use_column_value>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-clean_run"] +===== `clean_run` + + * Value type is <> + * Default value is `false` + +Whether the previous run state should be preserved + +[id="{version}-plugins-{type}s-{plugin}-columns_charset"] +===== `columns_charset` + + * Value type is <> + * Default value is `{}` + +The character encoding for specific columns. This option will override the `:charset` option +for the specified columns. + +Example: +[source,ruby] +------------------------------------------------------- +input { + jdbc { + ... + columns_charset => { "column0" => "ISO-8859-1" } + ... + } +} +------------------------------------------------------- +this will only convert column0 that has ISO-8859-1 as an original encoding. + +[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts"] +===== `connection_retry_attempts` + + * Value type is <> + * Default value is `1` + +Maximum number of times to try connecting to database + +[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time"] +===== `connection_retry_attempts_wait_time` + + * Value type is <> + * Default value is `0.5` + +Number of seconds to sleep between connection attempts + +[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] +===== `jdbc_connection_string` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC connection string + +[id="{version}-plugins-{type}s-{plugin}-jdbc_default_timezone"] +===== `jdbc_default_timezone` + + * Value type is <> + * There is no default value for this setting. + +Timezone conversion. +SQL does not allow for timezone data in timestamp fields. This plugin will automatically +convert your SQL timestamp fields to Logstash timestamps, in relative UTC time in ISO8601 format. + +Using this setting will manually assign a specified timezone offset, instead +of using the timezone setting of the local machine. You must use a canonical +timezone, *America/Denver*, for example. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] +===== `jdbc_driver_class` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC driver class to load, for exmaple, "org.apache.derby.jdbc.ClientDriver" +NB per https://github.com/logstash-plugins/logstash-input-jdbc/issues/43 if you are using +the Oracle JDBC driver (ojdbc6.jar) the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"` + +[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] +===== `jdbc_driver_library` + + * Value type is <> + * There is no default value for this setting. + +Tentative of abstracting JDBC logic to a mixin +for potential reuse in other plugins (input/output) +This method is called when someone includes this module +Add these methods to the 'base' given. +JDBC driver library path to third party driver library. In case of multiple libraries being +required you can pass them separated by a comma. + +If not provided, Plugin will look for the driver class in the Logstash Java classpath. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_fetch_size"] +===== `jdbc_fetch_size` + + * Value type is <> + * There is no default value for this setting. + +JDBC fetch size. if not provided, respective driver's default will be used + +[id="{version}-plugins-{type}s-{plugin}-jdbc_page_size"] +===== `jdbc_page_size` + + * Value type is <> + * Default value is `100000` + +JDBC page size + +[id="{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled"] +===== `jdbc_paging_enabled` + + * Value type is <> + * Default value is `false` + +JDBC enable paging + +This will cause a sql statement to be broken up into multiple queries. +Each query will use limits and offsets to collectively retrieve the full +result-set. The limit size is set with `jdbc_page_size`. + +Be aware that ordering is not guaranteed between queries. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] +===== `jdbc_password` + + * Value type is <> + * There is no default value for this setting. + +JDBC password + +[id="{version}-plugins-{type}s-{plugin}-jdbc_password_filepath"] +===== `jdbc_password_filepath` + + * Value type is <> + * There is no default value for this setting. + +JDBC password filename + +[id="{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout"] +===== `jdbc_pool_timeout` + + * Value type is <> + * Default value is `5` + +Connection pool configuration. +The amount of seconds to wait to acquire a connection before raising a PoolTimeoutError (default 5) + +[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] +===== `jdbc_user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JDBC user + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] +===== `jdbc_validate_connection` + + * Value type is <> + * Default value is `false` + +Connection pool configuration. +Validate connection before use. + +[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] +===== `jdbc_validation_timeout` + + * Value type is <> + * Default value is `3600` + +Connection pool configuration. +How often to validate a connection (in seconds) + +[id="{version}-plugins-{type}s-{plugin}-last_run_metadata_path"] +===== `last_run_metadata_path` + + * Value type is <> + * Default value is `"/home/ph/.logstash_jdbc_last_run"` + +Path to file with last run time + +[id="{version}-plugins-{type}s-{plugin}-lowercase_column_names"] +===== `lowercase_column_names` + + * Value type is <> + * Default value is `true` + +Whether to force the lowercasing of identifier fields + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * Default value is `{}` + +Hash of query parameter, for example `{ "target_id" => "321" }` + +[id="{version}-plugins-{type}s-{plugin}-record_last_run"] +===== `record_last_run` + + * Value type is <> + * Default value is `true` + +Whether to save state or not in last_run_metadata_path + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically run statement, in Cron format +for example: "* * * * *" (execute query every minute, on the minute) + +There is no schedule by default. If no schedule is given, then the statement is run +exactly once. + +[id="{version}-plugins-{type}s-{plugin}-sequel_opts"] +===== `sequel_opts` + + * Value type is <> + * Default value is `{}` + +General/Vendor-specific Sequel configuration options. + +An example of an optional connection pool configuration + max_connections - The maximum number of connections the connection pool + +examples of vendor-specific options can be found in this +documentation page: https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc + +[id="{version}-plugins-{type}s-{plugin}-sql_log_level"] +===== `sql_log_level` + + * Value can be any of: `fatal`, `error`, `warn`, `info`, `debug` + * Default value is `"info"` + +Log level at which to log SQL queries, the accepted values are the common ones fatal, error, warn, +info and debug. The default value is info. + +[id="{version}-plugins-{type}s-{plugin}-statement"] +===== `statement` + + * Value type is <> + * There is no default value for this setting. + +If undefined, Logstash will complain, even if codec is unused. +Statement to execute + +To use parameters, use named parameter syntax. +For example: + +[source, ruby] +----------------------------------------------- +"SELECT * FROM MYTABLE WHERE id = :target_id" +----------------------------------------------- + +here, ":target_id" is a named parameter. You can configure named parameters +with the `parameters` setting. + +[id="{version}-plugins-{type}s-{plugin}-statement_filepath"] +===== `statement_filepath` + + * Value type is <> + * There is no default value for this setting. + +Path of file containing statement to execute + +[id="{version}-plugins-{type}s-{plugin}-tracking_column"] +===== `tracking_column` + + * Value type is <> + * There is no default value for this setting. + +If tracking column value rather than timestamp, the column whose value is to be tracked + +[id="{version}-plugins-{type}s-{plugin}-tracking_column_type"] +===== `tracking_column_type` + + * Value can be any of: `numeric`, `timestamp` + * Default value is `"numeric"` + +Type of tracking column. Currently only "numeric" and "timestamp" + +[id="{version}-plugins-{type}s-{plugin}-use_column_value"] +===== `use_column_value` + + * Value type is <> + * Default value is `false` + +Use an incremental column value rather than a timestamp + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jms-index.asciidoc b/docs/versioned-plugins/inputs/jms-index.asciidoc new file mode 100644 index 000000000..d968410fa --- /dev/null +++ b/docs/versioned-plugins/inputs/jms-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: jms +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::jms-v3.0.4.asciidoc[] +include::jms-v3.0.3.asciidoc[] +include::jms-v3.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/jms-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/jms-v3.0.2.asciidoc new file mode 100644 index 000000000..f373a3951 --- /dev/null +++ b/docs/versioned-plugins/inputs/jms-v3.0.2.asciidoc @@ -0,0 +1,259 @@ +:plugin: jms +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-jms/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Jms input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from a Jms Broker. Supports both Jms Queues and Topics. + +For more information about Jms, see +For more information about the Ruby Gem used, see +Here is a config example to pull from a queue: + jms { + include_header => false + include_properties => false + include_body => true + use_jms_timestamp => false + interval => 10 + destination => "myqueue" + pub-sub => false + yaml_file => "~/jms.yml" + yaml_section => "mybroker" + } + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jms Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-broker_url>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-factory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_body>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_header>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_properties>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jndi_context>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jndi_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pub_sub>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-require_jars>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-runner>> |<>, one of `["consumer", "async", "thread"]`|No +| <<{version}-plugins-{type}s-{plugin}-selector>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_jms_timestamp>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-yaml_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-yaml_section>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-broker_url"] +===== `broker_url` + + * Value type is <> + * There is no default value for this setting. + +Url to use when connecting to the JMS provider + +[id="{version}-plugins-{type}s-{plugin}-destination"] +===== `destination` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Name of the destination queue or topic to use. + +[id="{version}-plugins-{type}s-{plugin}-factory"] +===== `factory` + + * Value type is <> + * There is no default value for this setting. + +Name of JMS Provider Factory class + +[id="{version}-plugins-{type}s-{plugin}-include_body"] +===== `include_body` + + * Value type is <> + * Default value is `true` + +Include JMS Message Body in the event +Supports TextMessage, MapMessage and ByteMessage +If the JMS Message is a TextMessage or ByteMessage, then the value will be in the "message" field of the event +If the JMS Message is a MapMessage, then all the key/value pairs will be added in the Hashmap of the event +StreamMessage and ObjectMessage are not supported + +[id="{version}-plugins-{type}s-{plugin}-include_header"] +===== `include_header` + + * Value type is <> + * Default value is `true` + +A JMS message has three parts : + Message Headers (required) + Message Properties (optional) + Message Bodies (optional) +You can tell the input plugin which parts should be included in the event produced by Logstash + +Include JMS Message Header Field values in the event + +[id="{version}-plugins-{type}s-{plugin}-include_properties"] +===== `include_properties` + + * Value type is <> + * Default value is `true` + +Include JMS Message Properties Field values in the event + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `10` + +Polling interval in seconds. +This is the time sleeping between asks to a consumed Queue. +This parameter has non influence in the case of a subcribed Topic. + +[id="{version}-plugins-{type}s-{plugin}-jndi_context"] +===== `jndi_context` + + * Value type is <> + * There is no default value for this setting. + +Mandatory if jndi lookup is being used, +contains details on how to connect to JNDI server + +[id="{version}-plugins-{type}s-{plugin}-jndi_name"] +===== `jndi_name` + + * Value type is <> + * There is no default value for this setting. + +Name of JNDI entry at which the Factory can be found + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to use when connecting to the JMS provider + +[id="{version}-plugins-{type}s-{plugin}-pub_sub"] +===== `pub_sub` + + * Value type is <> + * Default value is `false` + +If pub-sub (topic) style should be used. + +[id="{version}-plugins-{type}s-{plugin}-require_jars"] +===== `require_jars` + + * Value type is <> + * There is no default value for this setting. + +If you do not use an yaml configuration use either the factory or jndi_name. +An optional array of Jar file names to load for the specified +JMS provider. By using this option it is not necessary +to put all the JMS Provider specific jar files into the +java CLASSPATH prior to starting Logstash. + +[id="{version}-plugins-{type}s-{plugin}-runner"] +===== `runner` + + * Value can be any of: `consumer`, `async`, `thread` + * Default value is `"consumer"` + +Choose an implementation of the run block. Value can be either consumer, async or thread + +[id="{version}-plugins-{type}s-{plugin}-selector"] +===== `selector` + + * Value type is <> + * There is no default value for this setting. + +Set the selector to use to get messages off the queue or topic + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Initial connection timeout in seconds. + +[id="{version}-plugins-{type}s-{plugin}-use_jms_timestamp"] +===== `use_jms_timestamp` + + * Value type is <> + * Default value is `false` + +Convert the JMSTimestamp header field to the @timestamp value of the event + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * Value type is <> + * There is no default value for this setting. + +Username to connect to JMS provider with + +[id="{version}-plugins-{type}s-{plugin}-yaml_file"] +===== `yaml_file` + + * Value type is <> + * There is no default value for this setting. + +Yaml config file + +[id="{version}-plugins-{type}s-{plugin}-yaml_section"] +===== `yaml_section` + + * Value type is <> + * There is no default value for this setting. + +Yaml config file section name +For some known examples, see: [Example jms.yml](https://github.com/reidmorrison/jruby-jms/blob/master/examples/jms.yml) + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jms-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/jms-v3.0.3.asciidoc new file mode 100644 index 000000000..9a674a00f --- /dev/null +++ b/docs/versioned-plugins/inputs/jms-v3.0.3.asciidoc @@ -0,0 +1,259 @@ +:plugin: jms +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-jms/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Jms input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from a Jms Broker. Supports both Jms Queues and Topics. + +For more information about Jms, see +For more information about the Ruby Gem used, see +Here is a config example to pull from a queue: + jms { + include_header => false + include_properties => false + include_body => true + use_jms_timestamp => false + interval => 10 + destination => "myqueue" + pub-sub => false + yaml_file => "~/jms.yml" + yaml_section => "mybroker" + } + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jms Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-broker_url>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-factory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_body>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_header>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_properties>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jndi_context>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jndi_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pub_sub>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-require_jars>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-runner>> |<>, one of `["consumer", "async", "thread"]`|No +| <<{version}-plugins-{type}s-{plugin}-selector>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_jms_timestamp>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-yaml_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-yaml_section>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-broker_url"] +===== `broker_url` + + * Value type is <> + * There is no default value for this setting. + +Url to use when connecting to the JMS provider + +[id="{version}-plugins-{type}s-{plugin}-destination"] +===== `destination` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Name of the destination queue or topic to use. + +[id="{version}-plugins-{type}s-{plugin}-factory"] +===== `factory` + + * Value type is <> + * There is no default value for this setting. + +Name of JMS Provider Factory class + +[id="{version}-plugins-{type}s-{plugin}-include_body"] +===== `include_body` + + * Value type is <> + * Default value is `true` + +Include JMS Message Body in the event +Supports TextMessage, MapMessage and ByteMessage +If the JMS Message is a TextMessage or ByteMessage, then the value will be in the "message" field of the event +If the JMS Message is a MapMessage, then all the key/value pairs will be added in the Hashmap of the event +StreamMessage and ObjectMessage are not supported + +[id="{version}-plugins-{type}s-{plugin}-include_header"] +===== `include_header` + + * Value type is <> + * Default value is `true` + +A JMS message has three parts : + Message Headers (required) + Message Properties (optional) + Message Bodies (optional) +You can tell the input plugin which parts should be included in the event produced by Logstash + +Include JMS Message Header Field values in the event + +[id="{version}-plugins-{type}s-{plugin}-include_properties"] +===== `include_properties` + + * Value type is <> + * Default value is `true` + +Include JMS Message Properties Field values in the event + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `10` + +Polling interval in seconds. +This is the time sleeping between asks to a consumed Queue. +This parameter has non influence in the case of a subcribed Topic. + +[id="{version}-plugins-{type}s-{plugin}-jndi_context"] +===== `jndi_context` + + * Value type is <> + * There is no default value for this setting. + +Mandatory if jndi lookup is being used, +contains details on how to connect to JNDI server + +[id="{version}-plugins-{type}s-{plugin}-jndi_name"] +===== `jndi_name` + + * Value type is <> + * There is no default value for this setting. + +Name of JNDI entry at which the Factory can be found + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to use when connecting to the JMS provider + +[id="{version}-plugins-{type}s-{plugin}-pub_sub"] +===== `pub_sub` + + * Value type is <> + * Default value is `false` + +If pub-sub (topic) style should be used. + +[id="{version}-plugins-{type}s-{plugin}-require_jars"] +===== `require_jars` + + * Value type is <> + * There is no default value for this setting. + +If you do not use an yaml configuration use either the factory or jndi_name. +An optional array of Jar file names to load for the specified +JMS provider. By using this option it is not necessary +to put all the JMS Provider specific jar files into the +java CLASSPATH prior to starting Logstash. + +[id="{version}-plugins-{type}s-{plugin}-runner"] +===== `runner` + + * Value can be any of: `consumer`, `async`, `thread` + * Default value is `"consumer"` + +Choose an implementation of the run block. Value can be either consumer, async or thread + +[id="{version}-plugins-{type}s-{plugin}-selector"] +===== `selector` + + * Value type is <> + * There is no default value for this setting. + +Set the selector to use to get messages off the queue or topic + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Initial connection timeout in seconds. + +[id="{version}-plugins-{type}s-{plugin}-use_jms_timestamp"] +===== `use_jms_timestamp` + + * Value type is <> + * Default value is `false` + +Convert the JMSTimestamp header field to the @timestamp value of the event + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * Value type is <> + * There is no default value for this setting. + +Username to connect to JMS provider with + +[id="{version}-plugins-{type}s-{plugin}-yaml_file"] +===== `yaml_file` + + * Value type is <> + * There is no default value for this setting. + +Yaml config file + +[id="{version}-plugins-{type}s-{plugin}-yaml_section"] +===== `yaml_section` + + * Value type is <> + * There is no default value for this setting. + +Yaml config file section name +For some known examples, see: [Example jms.yml](https://github.com/reidmorrison/jruby-jms/blob/master/examples/jms.yml) + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jms-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/jms-v3.0.4.asciidoc new file mode 100644 index 000000000..232dcdadc --- /dev/null +++ b/docs/versioned-plugins/inputs/jms-v3.0.4.asciidoc @@ -0,0 +1,259 @@ +:plugin: jms +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-jms/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Jms input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from a Jms Broker. Supports both Jms Queues and Topics. + +For more information about Jms, see +For more information about the Ruby Gem used, see +Here is a config example to pull from a queue: + jms { + include_header => false + include_properties => false + include_body => true + use_jms_timestamp => false + interval => 10 + destination => "myqueue" + pub-sub => false + yaml_file => "~/jms.yml" + yaml_section => "mybroker" + } + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jms Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-broker_url>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-factory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_body>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_header>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_properties>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jndi_context>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jndi_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pub_sub>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-require_jars>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-runner>> |<>, one of `["consumer", "async", "thread"]`|No +| <<{version}-plugins-{type}s-{plugin}-selector>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_jms_timestamp>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-yaml_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-yaml_section>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-broker_url"] +===== `broker_url` + + * Value type is <> + * There is no default value for this setting. + +Url to use when connecting to the JMS provider + +[id="{version}-plugins-{type}s-{plugin}-destination"] +===== `destination` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Name of the destination queue or topic to use. + +[id="{version}-plugins-{type}s-{plugin}-factory"] +===== `factory` + + * Value type is <> + * There is no default value for this setting. + +Name of JMS Provider Factory class + +[id="{version}-plugins-{type}s-{plugin}-include_body"] +===== `include_body` + + * Value type is <> + * Default value is `true` + +Include JMS Message Body in the event +Supports TextMessage, MapMessage and ByteMessage +If the JMS Message is a TextMessage or ByteMessage, then the value will be in the "message" field of the event +If the JMS Message is a MapMessage, then all the key/value pairs will be added in the Hashmap of the event +StreamMessage and ObjectMessage are not supported + +[id="{version}-plugins-{type}s-{plugin}-include_header"] +===== `include_header` + + * Value type is <> + * Default value is `true` + +A JMS message has three parts : + Message Headers (required) + Message Properties (optional) + Message Bodies (optional) +You can tell the input plugin which parts should be included in the event produced by Logstash + +Include JMS Message Header Field values in the event + +[id="{version}-plugins-{type}s-{plugin}-include_properties"] +===== `include_properties` + + * Value type is <> + * Default value is `true` + +Include JMS Message Properties Field values in the event + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `10` + +Polling interval in seconds. +This is the time sleeping between asks to a consumed Queue. +This parameter has non influence in the case of a subcribed Topic. + +[id="{version}-plugins-{type}s-{plugin}-jndi_context"] +===== `jndi_context` + + * Value type is <> + * There is no default value for this setting. + +Mandatory if jndi lookup is being used, +contains details on how to connect to JNDI server + +[id="{version}-plugins-{type}s-{plugin}-jndi_name"] +===== `jndi_name` + + * Value type is <> + * There is no default value for this setting. + +Name of JNDI entry at which the Factory can be found + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to use when connecting to the JMS provider + +[id="{version}-plugins-{type}s-{plugin}-pub_sub"] +===== `pub_sub` + + * Value type is <> + * Default value is `false` + +If pub-sub (topic) style should be used. + +[id="{version}-plugins-{type}s-{plugin}-require_jars"] +===== `require_jars` + + * Value type is <> + * There is no default value for this setting. + +If you do not use an yaml configuration use either the factory or jndi_name. +An optional array of Jar file names to load for the specified +JMS provider. By using this option it is not necessary +to put all the JMS Provider specific jar files into the +java CLASSPATH prior to starting Logstash. + +[id="{version}-plugins-{type}s-{plugin}-runner"] +===== `runner` + + * Value can be any of: `consumer`, `async`, `thread` + * Default value is `"consumer"` + +Choose an implementation of the run block. Value can be either consumer, async or thread + +[id="{version}-plugins-{type}s-{plugin}-selector"] +===== `selector` + + * Value type is <> + * There is no default value for this setting. + +Set the selector to use to get messages off the queue or topic + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Initial connection timeout in seconds. + +[id="{version}-plugins-{type}s-{plugin}-use_jms_timestamp"] +===== `use_jms_timestamp` + + * Value type is <> + * Default value is `false` + +Convert the JMSTimestamp header field to the @timestamp value of the event + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * Value type is <> + * There is no default value for this setting. + +Username to connect to JMS provider with + +[id="{version}-plugins-{type}s-{plugin}-yaml_file"] +===== `yaml_file` + + * Value type is <> + * There is no default value for this setting. + +Yaml config file + +[id="{version}-plugins-{type}s-{plugin}-yaml_section"] +===== `yaml_section` + + * Value type is <> + * There is no default value for this setting. + +Yaml config file section name +For some known examples, see: [Example jms.yml](https://github.com/reidmorrison/jruby-jms/blob/master/examples/jms.yml) + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jmx-index.asciidoc b/docs/versioned-plugins/inputs/jmx-index.asciidoc new file mode 100644 index 000000000..9c7ee4697 --- /dev/null +++ b/docs/versioned-plugins/inputs/jmx-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: jmx +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-14 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::jmx-v3.0.4.asciidoc[] +include::jmx-v3.0.3.asciidoc[] +include::jmx-v3.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/jmx-pipe-index.asciidoc b/docs/versioned-plugins/inputs/jmx-pipe-index.asciidoc new file mode 100644 index 000000000..0bccdd2a5 --- /dev/null +++ b/docs/versioned-plugins/inputs/jmx-pipe-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: jmx-pipe +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/inputs/jmx-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/jmx-v3.0.2.asciidoc new file mode 100644 index 000000000..213198633 --- /dev/null +++ b/docs/versioned-plugins/inputs/jmx-v3.0.2.asciidoc @@ -0,0 +1,157 @@ +:plugin: jmx +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-jmx/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Jmx input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin permits to retrieve metrics from remote Java applications using JMX. +Every `polling_frequency`, it scans a folder containing json configuration +files describing JVMs to monitor with metrics to retrieve. +Then a pool of threads will retrieve metrics and create events. + +## The configuration: + +In Logstash configuration, you must set the polling frequency, +the number of thread used to poll metrics and a directory absolute path containing +json files with the configuration per jvm of metrics to retrieve. +Logstash input configuration example: +[source,ruby] + jmx { + //Required + path => "/apps/logstash_conf/jmxconf" + //Optional, default 60s + polling_frequency => 15 + type => "jmx" + //Optional, default 4 + nb_thread => 4 + } + +Json JMX configuration example: +[source,js] + { + //Required, JMX listening host/ip + "host" : "192.168.1.2", + //Required, JMX listening port + "port" : 1335, + //Optional, the username to connect to JMX + "username" : "user", + //Optional, the password to connect to JMX + "password": "pass", + //Optional, use this alias as a prefix in the metric name. If not set use _ + "alias" : "test.homeserver.elasticsearch", + //Required, list of JMX metrics to retrieve + "queries" : [ + { + //Required, the object name of Mbean to request + "object_name" : "java.lang:type=Memory", + //Optional, use this alias in the metrics value instead of the object_name + "object_alias" : "Memory" + }, { + "object_name" : "java.lang:type=Runtime", + //Optional, set of attributes to retrieve. If not set retrieve + //all metrics available on the configured object_name. + "attributes" : [ "Uptime", "StartTime" ], + "object_alias" : "Runtime" + }, { + //object_name can be configured with * to retrieve all matching Mbeans + "object_name" : "java.lang:type=GarbageCollector,name=*", + "attributes" : [ "CollectionCount", "CollectionTime" ], + //object_alias can be based on specific value from the object_name thanks to ${}. + //In this case ${type} will be replaced by GarbageCollector... + "object_alias" : "${type}.${name}" + }, { + "object_name" : "java.nio:type=BufferPool,name=*", + "object_alias" : "${type}.${name}" + } ] + } + +Here are examples of generated events. When returned metrics value type is +number/boolean it is stored in `metric_value_number` event field +otherwise it is stored in `metric_value_string` event field. +[source,ruby] + { + "@version" => "1", + "@timestamp" => "2014-02-18T20:57:27.688Z", + "host" => "192.168.1.2", + "path" => "/apps/logstash_conf/jmxconf", + "type" => "jmx", + "metric_path" => "test.homeserver.elasticsearch.GarbageCollector.ParNew.CollectionCount", + "metric_value_number" => 2212 + } + +[source,ruby] + { + "@version" => "1", + "@timestamp" => "2014-02-18T20:58:06.376Z", + "host" => "localhost", + "path" => "/apps/logstash_conf/jmxconf", + "type" => "jmx", + "metric_path" => "test.homeserver.elasticsearch.BufferPool.mapped.ObjectName", + "metric_value_string" => "java.nio:type=BufferPool,name=mapped" + } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jmx Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-nb_thread>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-polling_frequency>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-nb_thread"] +===== `nb_thread` + + * Value type is <> + * Default value is `4` + +Indicate number of thread launched to retrieve metrics + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Path where json conf files are stored + +[id="{version}-plugins-{type}s-{plugin}-polling_frequency"] +===== `polling_frequency` + + * Value type is <> + * Default value is `60` + +Indicate interval between two jmx metrics retrieval +(in s) + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jmx-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/jmx-v3.0.3.asciidoc new file mode 100644 index 000000000..cad0a65b4 --- /dev/null +++ b/docs/versioned-plugins/inputs/jmx-v3.0.3.asciidoc @@ -0,0 +1,157 @@ +:plugin: jmx +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-jmx/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Jmx input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin permits to retrieve metrics from remote Java applications using JMX. +Every `polling_frequency`, it scans a folder containing json configuration +files describing JVMs to monitor with metrics to retrieve. +Then a pool of threads will retrieve metrics and create events. + +## The configuration: + +In Logstash configuration, you must set the polling frequency, +the number of thread used to poll metrics and a directory absolute path containing +json files with the configuration per jvm of metrics to retrieve. +Logstash input configuration example: +[source,ruby] + jmx { + //Required + path => "/apps/logstash_conf/jmxconf" + //Optional, default 60s + polling_frequency => 15 + type => "jmx" + //Optional, default 4 + nb_thread => 4 + } + +Json JMX configuration example: +[source,js] + { + //Required, JMX listening host/ip + "host" : "192.168.1.2", + //Required, JMX listening port + "port" : 1335, + //Optional, the username to connect to JMX + "username" : "user", + //Optional, the password to connect to JMX + "password": "pass", + //Optional, use this alias as a prefix in the metric name. If not set use _ + "alias" : "test.homeserver.elasticsearch", + //Required, list of JMX metrics to retrieve + "queries" : [ + { + //Required, the object name of Mbean to request + "object_name" : "java.lang:type=Memory", + //Optional, use this alias in the metrics value instead of the object_name + "object_alias" : "Memory" + }, { + "object_name" : "java.lang:type=Runtime", + //Optional, set of attributes to retrieve. If not set retrieve + //all metrics available on the configured object_name. + "attributes" : [ "Uptime", "StartTime" ], + "object_alias" : "Runtime" + }, { + //object_name can be configured with * to retrieve all matching Mbeans + "object_name" : "java.lang:type=GarbageCollector,name=*", + "attributes" : [ "CollectionCount", "CollectionTime" ], + //object_alias can be based on specific value from the object_name thanks to ${}. + //In this case ${type} will be replaced by GarbageCollector... + "object_alias" : "${type}.${name}" + }, { + "object_name" : "java.nio:type=BufferPool,name=*", + "object_alias" : "${type}.${name}" + } ] + } + +Here are examples of generated events. When returned metrics value type is +number/boolean it is stored in `metric_value_number` event field +otherwise it is stored in `metric_value_string` event field. +[source,ruby] + { + "@version" => "1", + "@timestamp" => "2014-02-18T20:57:27.688Z", + "host" => "192.168.1.2", + "path" => "/apps/logstash_conf/jmxconf", + "type" => "jmx", + "metric_path" => "test.homeserver.elasticsearch.GarbageCollector.ParNew.CollectionCount", + "metric_value_number" => 2212 + } + +[source,ruby] + { + "@version" => "1", + "@timestamp" => "2014-02-18T20:58:06.376Z", + "host" => "localhost", + "path" => "/apps/logstash_conf/jmxconf", + "type" => "jmx", + "metric_path" => "test.homeserver.elasticsearch.BufferPool.mapped.ObjectName", + "metric_value_string" => "java.nio:type=BufferPool,name=mapped" + } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jmx Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-nb_thread>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-polling_frequency>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-nb_thread"] +===== `nb_thread` + + * Value type is <> + * Default value is `4` + +Indicate number of thread launched to retrieve metrics + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Path where json conf files are stored + +[id="{version}-plugins-{type}s-{plugin}-polling_frequency"] +===== `polling_frequency` + + * Value type is <> + * Default value is `60` + +Indicate interval between two jmx metrics retrieval +(in s) + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jmx-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/jmx-v3.0.4.asciidoc new file mode 100644 index 000000000..5a8892425 --- /dev/null +++ b/docs/versioned-plugins/inputs/jmx-v3.0.4.asciidoc @@ -0,0 +1,157 @@ +:plugin: jmx +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-11-14 +:changelog_url: https://github.com/logstash-plugins/logstash-input-jmx/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Jmx input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input plugin permits to retrieve metrics from remote Java applications using JMX. +Every `polling_frequency`, it scans a folder containing json configuration +files describing JVMs to monitor with metrics to retrieve. +Then a pool of threads will retrieve metrics and create events. + +## The configuration: + +In Logstash configuration, you must set the polling frequency, +the number of thread used to poll metrics and a directory absolute path containing +json files with the configuration per jvm of metrics to retrieve. +Logstash input configuration example: +[source,ruby] + jmx { + //Required + path => "/apps/logstash_conf/jmxconf" + //Optional, default 60s + polling_frequency => 15 + type => "jmx" + //Optional, default 4 + nb_thread => 4 + } + +Json JMX configuration example: +[source,js] + { + //Required, JMX listening host/ip + "host" : "192.168.1.2", + //Required, JMX listening port + "port" : 1335, + //Optional, the username to connect to JMX + "username" : "user", + //Optional, the password to connect to JMX + "password": "pass", + //Optional, use this alias as a prefix in the metric name. If not set use _ + "alias" : "test.homeserver.elasticsearch", + //Required, list of JMX metrics to retrieve + "queries" : [ + { + //Required, the object name of Mbean to request + "object_name" : "java.lang:type=Memory", + //Optional, use this alias in the metrics value instead of the object_name + "object_alias" : "Memory" + }, { + "object_name" : "java.lang:type=Runtime", + //Optional, set of attributes to retrieve. If not set retrieve + //all metrics available on the configured object_name. + "attributes" : [ "Uptime", "StartTime" ], + "object_alias" : "Runtime" + }, { + //object_name can be configured with * to retrieve all matching Mbeans + "object_name" : "java.lang:type=GarbageCollector,name=*", + "attributes" : [ "CollectionCount", "CollectionTime" ], + //object_alias can be based on specific value from the object_name thanks to ${}. + //In this case ${type} will be replaced by GarbageCollector... + "object_alias" : "${type}.${name}" + }, { + "object_name" : "java.nio:type=BufferPool,name=*", + "object_alias" : "${type}.${name}" + } ] + } + +Here are examples of generated events. When returned metrics value type is +number/boolean it is stored in `metric_value_number` event field +otherwise it is stored in `metric_value_string` event field. +[source,ruby] + { + "@version" => "1", + "@timestamp" => "2014-02-18T20:57:27.688Z", + "host" => "192.168.1.2", + "path" => "/apps/logstash_conf/jmxconf", + "type" => "jmx", + "metric_path" => "test.homeserver.elasticsearch.GarbageCollector.ParNew.CollectionCount", + "metric_value_number" => 2212 + } + +[source,ruby] + { + "@version" => "1", + "@timestamp" => "2014-02-18T20:58:06.376Z", + "host" => "localhost", + "path" => "/apps/logstash_conf/jmxconf", + "type" => "jmx", + "metric_path" => "test.homeserver.elasticsearch.BufferPool.mapped.ObjectName", + "metric_value_string" => "java.nio:type=BufferPool,name=mapped" + } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jmx Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-nb_thread>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-polling_frequency>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-nb_thread"] +===== `nb_thread` + + * Value type is <> + * Default value is `4` + +Indicate number of thread launched to retrieve metrics + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Path where json conf files are stored + +[id="{version}-plugins-{type}s-{plugin}-polling_frequency"] +===== `polling_frequency` + + * Value type is <> + * Default value is `60` + +Indicate interval between two jmx metrics retrieval +(in s) + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/journald-index.asciidoc b/docs/versioned-plugins/inputs/journald-index.asciidoc new file mode 100644 index 000000000..80cd8207f --- /dev/null +++ b/docs/versioned-plugins/inputs/journald-index.asciidoc @@ -0,0 +1,12 @@ +:plugin: journald +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-06-23 +|======================================================================= + +include::journald-v2.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/journald-v2.0.1.asciidoc b/docs/versioned-plugins/inputs/journald-v2.0.1.asciidoc new file mode 100644 index 000000000..957673266 --- /dev/null +++ b/docs/versioned-plugins/inputs/journald-v2.0.1.asciidoc @@ -0,0 +1,152 @@ +:plugin: journald +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-journald/blob/v2.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Journald input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Pull events from a local systemd journal. + +See requirements https://github.com/ledbettj/systemd-journal + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Journald Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-filter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flags>> |<>, one of `[0, 1, 2, 4]`|No +| <<{version}-plugins-{type}s-{plugin}-lowercase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-seekto>> |<>, one of `["head", "tail"]`|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_write_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-thisboot>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-wait_timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-filter"] +===== `filter` + + * Value type is <> + * Default value is `{}` + +Filter on events. Not heavily tested. + + +[id="{version}-plugins-{type}s-{plugin}-flags"] +===== `flags` + + * Value can be any of: `0`, `1`, `2`, `4` + * Default value is `0` + +System journal flags +0 = all avalable +1 = local only +2 = runtime only +4 = system only + + +[id="{version}-plugins-{type}s-{plugin}-lowercase"] +===== `lowercase` + + * Value type is <> + * Default value is `false` + +Lowercase annoying UPPERCASE fieldnames. (May clobber existing fields) + + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * Default value is `"/var/log/journal"` + +Path to read journal files from + + +[id="{version}-plugins-{type}s-{plugin}-seekto"] +===== `seekto` + + * Value can be any of: `head`, `tail` + * Default value is `"tail"` + +Where in the journal to start capturing logs +Options: head, tail + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is <> + * There is no default value for this setting. + +Where to write the sincedb database (keeps track of the current +position of the journal). The default will write +the sincedb file to matching `$HOME/.sincedb_journal` + + +[id="{version}-plugins-{type}s-{plugin}-sincedb_write_interval"] +===== `sincedb_write_interval` + + * Value type is <> + * Default value is `15` + +How often (in seconds) to write a since database with the current position of +the journal. + + +[id="{version}-plugins-{type}s-{plugin}-thisboot"] +===== `thisboot` + + * Value type is <> + * Default value is `true` + +Filter logs since the system booted (only relevant with seekto => "head") + + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-wait_timeout"] +===== `wait_timeout` + + * Value type is <> + * Default value is `3000000` + +The max timeout in microsends to wait for new events from the journal. +Set to -1 to wait indefinitely. Setting this to a large value will +result in delayed shutdown of the plugin. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/kafka-index.asciidoc b/docs/versioned-plugins/inputs/kafka-index.asciidoc new file mode 100644 index 000000000..8c58188e6 --- /dev/null +++ b/docs/versioned-plugins/inputs/kafka-index.asciidoc @@ -0,0 +1,26 @@ +:plugin: kafka +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2018-01-05 +| <> | 2017-08-15 +| <> | 2017-08-01 +| <> | 2017-07-18 +| <> | 2017-07-13 +| <> | 2017-07-11 +| <> | 2017-06-23 +| <> | 2017-05-11 +|======================================================================= + +include::kafka-v8.0.4.asciidoc[] +include::kafka-v8.0.2.asciidoc[] +include::kafka-v8.0.0.asciidoc[] +include::kafka-v7.0.0.asciidoc[] +include::kafka-v6.3.4.asciidoc[] +include::kafka-v6.3.3.asciidoc[] +include::kafka-v6.3.2.asciidoc[] +include::kafka-v6.3.0.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/kafka-v6.3.0.asciidoc b/docs/versioned-plugins/inputs/kafka-v6.3.0.asciidoc new file mode 100644 index 000000000..944d1c737 --- /dev/null +++ b/docs/versioned-plugins/inputs/kafka-v6.3.0.asciidoc @@ -0,0 +1,551 @@ +:plugin: kafka +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v6.3.0 +:release_date: 2017-05-11 +:changelog_url: https://github.com/logstash-plugins/logstash-input-kafka/blob/v6.3.0/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Kafka + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will read events from a Kafka topic. It uses the 0.10 version of +the consumer API provided by Kafka to read messages from the broker. + +Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination +of Logstash and the Kafka input plugin: + +[options="header"] +|========================================================== +|Kafka Client Version |Logstash Version |Plugin Version |Why? +|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular +|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) +|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) +|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker +|0.10.1.x |2.4.x - 5.x.x | 6.x.x | +|========================================================== + +NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should +upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker +is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. + +This input supports connecting to Kafka over: + +* SSL (requires plugin version 3.0.0 or later) +* Kerberos SASL (requires plugin version 5.1.0 or later) + +By default security is disabled but can be turned on as needed. + +The Logstash Kafka consumer handles group management and uses the default offset management +strategy using Kafka topics. + +Logstash instances by default form a single logical group to subscribe to Kafka topics +Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively, +you could run multiple Logstash instances with the same `group_id` to spread the load across +physical machines. Messages in a topic will be distributed to all Logstash instances with +the same `group_id`. + +Ideally you should have as many threads as the number of partitions for a perfect balance -- +more threads than partitions means that some threads will be idle + +For more information see http://kafka.apache.org/documentation.html#theconsumer + +Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kafka Input Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-auto_offset_reset>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-check_crcs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connections_max_idle_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-consumer_threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-decorate_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-enable_auto_commit>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_internal_topics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_max_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_min_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-group_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-key_deserializer_class>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_poll_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_poll_records>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-partition_assignment_strategy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-poll_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No +| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topics_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-value_deserializer_class>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms"] +===== `auto_commit_interval_ms` + + * Value type is <> + * Default value is `"5000"` + +The frequency in milliseconds that the consumer offsets are committed to Kafka. + +[id="{version}-plugins-{type}s-{plugin}-auto_offset_reset"] +===== `auto_offset_reset` + + * Value type is <> + * There is no default value for this setting. + +What to do when there is no initial offset in Kafka or if an offset is out of range: + +* earliest: automatically reset the offset to the earliest offset +* latest: automatically reset the offset to the latest offset +* none: throw exception to the consumer if no previous offset is found for the consumer's group +* anything else: throw exception to the consumer. + +[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] +===== `bootstrap_servers` + + * Value type is <> + * Default value is `"localhost:9092"` + +A list of URLs of Kafka instances to use for establishing the initial connection to the cluster. +This list should be in the form of `host1:port1,host2:port2` These urls are just used +for the initial connection to discover the full cluster membership (which may change dynamically) +so this list need not contain the full set of servers (you may want more than one, though, in +case a server is down). + +[id="{version}-plugins-{type}s-{plugin}-check_crcs"] +===== `check_crcs` + + * Value type is <> + * There is no default value for this setting. + +Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk +corruption to the messages occurred. This check adds some overhead, so it may be +disabled in cases seeking extreme performance. + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * Value type is <> + * Default value is `"logstash"` + +The id string to pass to the server when making requests. The purpose of this +is to be able to track the source of requests beyond just ip/port by allowing +a logical application name to be included. + +[id="{version}-plugins-{type}s-{plugin}-connections_max_idle_ms"] +===== `connections_max_idle_ms` + + * Value type is <> + * There is no default value for this setting. + +Close idle connections after the number of milliseconds specified by this config. + +[id="{version}-plugins-{type}s-{plugin}-consumer_threads"] +===== `consumer_threads` + + * Value type is <> + * Default value is `1` + +Ideally you should have as many threads as the number of partitions for a perfect +balance — more threads than partitions means that some threads will be idle + +[id="{version}-plugins-{type}s-{plugin}-decorate_events"] +===== `decorate_events` + + * Value type is <> + * Default value is `false` + +Option to add Kafka metadata like topic, message size to the event. +This will add a field named `kafka` to the logstash event containing the following attributes: + `topic`: The topic this message is associated with + `consumer_group`: The consumer group used to read in this event + `partition`: The partition this message is associated with + `offset`: The offset from the partition this message is associated with + `key`: A ByteBuffer containing the message key + +[id="{version}-plugins-{type}s-{plugin}-enable_auto_commit"] +===== `enable_auto_commit` + + * Value type is <> + * Default value is `"true"` + +If true, periodically commit to Kafka the offsets of messages already returned by the consumer. +This committed offset will be used when the process fails as the position from +which the consumption will begin. + +[id="{version}-plugins-{type}s-{plugin}-exclude_internal_topics"] +===== `exclude_internal_topics` + + * Value type is <> + * There is no default value for this setting. + +Whether records from internal topics (such as offsets) should be exposed to the consumer. +If set to true the only way to receive records from an internal topic is subscribing to it. + +[id="{version}-plugins-{type}s-{plugin}-fetch_max_bytes"] +===== `fetch_max_bytes` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of data the server should return for a fetch request. This is not an +absolute maximum, if the first message in the first non-empty partition of the fetch is larger +than this value, the message will still be returned to ensure that the consumer can make progress. + +[id="{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms"] +===== `fetch_max_wait_ms` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of time the server will block before answering the fetch request if +there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This +should be less than or equal to the timeout used in `poll_timeout_ms` + +[id="{version}-plugins-{type}s-{plugin}-fetch_min_bytes"] +===== `fetch_min_bytes` + + * Value type is <> + * There is no default value for this setting. + +The minimum amount of data the server should return for a fetch request. If insufficient +data is available the request will wait for that much data to accumulate +before answering the request. + +[id="{version}-plugins-{type}s-{plugin}-group_id"] +===== `group_id` + + * Value type is <> + * Default value is `"logstash"` + +The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber +that happens to be made up of multiple processors. Messages in a topic will be distributed to all +Logstash instances with the same `group_id` + +[id="{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms"] +===== `heartbeat_interval_ms` + + * Value type is <> + * There is no default value for this setting. + +The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure +that the consumer's session stays active and to facilitate rebalancing when new +consumers join or leave the group. The value must be set lower than +`session.timeout.ms`, but typically should be set no higher than 1/3 of that value. +It can be adjusted even lower to control the expected time for normal rebalances. + +[id="{version}-plugins-{type}s-{plugin}-jaas_path"] +===== `jaas_path` + + * Value type is <> + * There is no default value for this setting. + +The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization +services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: +[source,java] +---------------------------------- +KafkaClient { + com.sun.security.auth.module.Krb5LoginModule required + useTicketCache=true + renewTicket=true + serviceName="kafka"; + }; +---------------------------------- + +Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these +to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same +`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on +different JVM instances. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] +===== `kerberos_config` + + * Value type is <> + * There is no default value for this setting. + +Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html + +[id="{version}-plugins-{type}s-{plugin}-key_deserializer_class"] +===== `key_deserializer_class` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` + +Java Class used to deserialize the record's key + +[id="{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes"] +===== `max_partition_fetch_bytes` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of data per-partition the server will return. The maximum total memory used for a +request will be #partitions * max.partition.fetch.bytes. This size must be at least +as large as the maximum message size the server allows or else it is possible for the producer to +send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying +to fetch a large message on a certain partition. + +[id="{version}-plugins-{type}s-{plugin}-max_poll_interval_ms"] +===== `max_poll_interval_ms` + + * Value type is <> + * There is no default value for this setting. + +The maximum delay between invocations of poll() when using consumer group management. This places +an upper bound on the amount of time that the consumer can be idle before fetching more records. +If poll() is not called before expiration of this timeout, then the consumer is considered failed and +the group will rebalance in order to reassign the partitions to another member. +The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms + +[id="{version}-plugins-{type}s-{plugin}-max_poll_records"] +===== `max_poll_records` + + * Value type is <> + * There is no default value for this setting. + +The maximum number of records returned in a single call to poll(). + +[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] +===== `metadata_max_age_ms` + + * Value type is <> + * There is no default value for this setting. + +The period of time in milliseconds after which we force a refresh of metadata even if +we haven't seen any partition leadership changes to proactively discover any new brokers or partitions + +[id="{version}-plugins-{type}s-{plugin}-partition_assignment_strategy"] +===== `partition_assignment_strategy` + + * Value type is <> + * There is no default value for this setting. + +The class name of the partition assignment strategy that the client will use to distribute +partition ownership amongst consumer instances + +[id="{version}-plugins-{type}s-{plugin}-poll_timeout_ms"] +===== `poll_timeout_ms` + + * Value type is <> + * Default value is `100` + +Time kafka consumer will wait to receive new messages from topics + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] +===== `reconnect_backoff_ms` + + * Value type is <> + * There is no default value for this setting. + +The amount of time to wait before attempting to reconnect to a given host. +This avoids repeatedly connecting to a host in a tight loop. +This backoff applies to all requests sent by the consumer to the broker. + +[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] +===== `request_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The configuration controls the maximum amount of time the client will wait +for the response of a request. If the response is not received before the timeout +elapses the client will resend the request if necessary or fail the request if +retries are exhausted. + +[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] +===== `retry_backoff_ms` + + * Value type is <> + * There is no default value for this setting. + +The amount of time to wait before attempting to retry a failed fetch request +to a given topic partition. This avoids repeated fetching-and-failing in a tight loop. + +[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] +===== `sasl_kerberos_service_name` + + * Value type is <> + * There is no default value for this setting. + +The Kerberos principal name that Kafka broker runs as. +This can be defined either in Kafka's JAAS config or in Kafka's config. + +[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] +===== `sasl_mechanism` + + * Value type is <> + * Default value is `"GSSAPI"` + +http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. +This may be any mechanism for which a security provider is available. +GSSAPI is the default mechanism. + +[id="{version}-plugins-{type}s-{plugin}-security_protocol"] +===== `security_protocol` + + * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` + * Default value is `"PLAINTEXT"` + +Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL + +[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] +===== `send_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The size of the TCP send buffer (SO_SNDBUF) to use when sending data + +[id="{version}-plugins-{type}s-{plugin}-session_timeout_ms"] +===== `session_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead +and a rebalance operation is triggered for the group identified by `group_id` + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `false` + +Enable SSL/TLS secured communication to Kafka broker. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] +===== `ssl_key_password` + + * Value type is <> + * There is no default value for this setting. + +The password of the private key in the key store file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] +===== `ssl_keystore_location` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore path. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value type is <> + * There is no default value for this setting. + +The keystore type. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] +===== `ssl_truststore_location` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore path to validate the Kafka broker's certificate. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is <> + * There is no default value for this setting. + +The truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + + * Value type is <> + * There is no default value for this setting. + +The truststore type. + +[id="{version}-plugins-{type}s-{plugin}-topics"] +===== `topics` + + * Value type is <> + * Default value is `["logstash"]` + +A list of topics to subscribe to, defaults to ["logstash"]. + +[id="{version}-plugins-{type}s-{plugin}-topics_pattern"] +===== `topics_pattern` + + * Value type is <> + * There is no default value for this setting. + +A topic regex pattern to subscribe to. +The topics configuration will be ignored when using this configuration. + +[id="{version}-plugins-{type}s-{plugin}-value_deserializer_class"] +===== `value_deserializer_class` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` + +Java Class used to deserialize the record's value + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/kafka-v6.3.2.asciidoc b/docs/versioned-plugins/inputs/kafka-v6.3.2.asciidoc new file mode 100644 index 000000000..0667ec1ff --- /dev/null +++ b/docs/versioned-plugins/inputs/kafka-v6.3.2.asciidoc @@ -0,0 +1,552 @@ +:plugin: kafka +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v6.3.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-kafka/blob/v6.3.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Kafka input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will read events from a Kafka topic. It uses the 0.10 version of +the consumer API provided by Kafka to read messages from the broker. + +Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination +of Logstash and the Kafka input plugin: + +[options="header"] +|========================================================== +|Kafka Client Version |Logstash Version |Plugin Version |Why? +|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular +|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) +|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) +|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker +|0.10.1.x |2.4.x - 5.x.x | 6.x.x | +|========================================================== + +NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should +upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker +is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. + +This input supports connecting to Kafka over: + +* SSL (requires plugin version 3.0.0 or later) +* Kerberos SASL (requires plugin version 5.1.0 or later) + +By default security is disabled but can be turned on as needed. + +The Logstash Kafka consumer handles group management and uses the default offset management +strategy using Kafka topics. + +Logstash instances by default form a single logical group to subscribe to Kafka topics +Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively, +you could run multiple Logstash instances with the same `group_id` to spread the load across +physical machines. Messages in a topic will be distributed to all Logstash instances with +the same `group_id`. + +Ideally you should have as many threads as the number of partitions for a perfect balance -- +more threads than partitions means that some threads will be idle + +For more information see http://kafka.apache.org/documentation.html#theconsumer + +Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kafka Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-auto_offset_reset>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-check_crcs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connections_max_idle_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-consumer_threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-decorate_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-enable_auto_commit>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_internal_topics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_max_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_min_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-group_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-key_deserializer_class>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_poll_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_poll_records>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-partition_assignment_strategy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-poll_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No +| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topics_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-value_deserializer_class>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms"] +===== `auto_commit_interval_ms` + + * Value type is <> + * Default value is `"5000"` + +The frequency in milliseconds that the consumer offsets are committed to Kafka. + +[id="{version}-plugins-{type}s-{plugin}-auto_offset_reset"] +===== `auto_offset_reset` + + * Value type is <> + * There is no default value for this setting. + +What to do when there is no initial offset in Kafka or if an offset is out of range: + +* earliest: automatically reset the offset to the earliest offset +* latest: automatically reset the offset to the latest offset +* none: throw exception to the consumer if no previous offset is found for the consumer's group +* anything else: throw exception to the consumer. + +[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] +===== `bootstrap_servers` + + * Value type is <> + * Default value is `"localhost:9092"` + +A list of URLs of Kafka instances to use for establishing the initial connection to the cluster. +This list should be in the form of `host1:port1,host2:port2` These urls are just used +for the initial connection to discover the full cluster membership (which may change dynamically) +so this list need not contain the full set of servers (you may want more than one, though, in +case a server is down). + +[id="{version}-plugins-{type}s-{plugin}-check_crcs"] +===== `check_crcs` + + * Value type is <> + * There is no default value for this setting. + +Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk +corruption to the messages occurred. This check adds some overhead, so it may be +disabled in cases seeking extreme performance. + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * Value type is <> + * Default value is `"logstash"` + +The id string to pass to the server when making requests. The purpose of this +is to be able to track the source of requests beyond just ip/port by allowing +a logical application name to be included. + +[id="{version}-plugins-{type}s-{plugin}-connections_max_idle_ms"] +===== `connections_max_idle_ms` + + * Value type is <> + * There is no default value for this setting. + +Close idle connections after the number of milliseconds specified by this config. + +[id="{version}-plugins-{type}s-{plugin}-consumer_threads"] +===== `consumer_threads` + + * Value type is <> + * Default value is `1` + +Ideally you should have as many threads as the number of partitions for a perfect +balance — more threads than partitions means that some threads will be idle + +[id="{version}-plugins-{type}s-{plugin}-decorate_events"] +===== `decorate_events` + + * Value type is <> + * Default value is `false` + +Option to add Kafka metadata like topic, message size to the event. +This will add a field named `kafka` to the logstash event containing the following attributes: + `topic`: The topic this message is associated with + `consumer_group`: The consumer group used to read in this event + `partition`: The partition this message is associated with + `offset`: The offset from the partition this message is associated with + `key`: A ByteBuffer containing the message key + +[id="{version}-plugins-{type}s-{plugin}-enable_auto_commit"] +===== `enable_auto_commit` + + * Value type is <> + * Default value is `"true"` + +If true, periodically commit to Kafka the offsets of messages already returned by the consumer. +This committed offset will be used when the process fails as the position from +which the consumption will begin. + +[id="{version}-plugins-{type}s-{plugin}-exclude_internal_topics"] +===== `exclude_internal_topics` + + * Value type is <> + * There is no default value for this setting. + +Whether records from internal topics (such as offsets) should be exposed to the consumer. +If set to true the only way to receive records from an internal topic is subscribing to it. + +[id="{version}-plugins-{type}s-{plugin}-fetch_max_bytes"] +===== `fetch_max_bytes` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of data the server should return for a fetch request. This is not an +absolute maximum, if the first message in the first non-empty partition of the fetch is larger +than this value, the message will still be returned to ensure that the consumer can make progress. + +[id="{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms"] +===== `fetch_max_wait_ms` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of time the server will block before answering the fetch request if +there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This +should be less than or equal to the timeout used in `poll_timeout_ms` + +[id="{version}-plugins-{type}s-{plugin}-fetch_min_bytes"] +===== `fetch_min_bytes` + + * Value type is <> + * There is no default value for this setting. + +The minimum amount of data the server should return for a fetch request. If insufficient +data is available the request will wait for that much data to accumulate +before answering the request. + +[id="{version}-plugins-{type}s-{plugin}-group_id"] +===== `group_id` + + * Value type is <> + * Default value is `"logstash"` + +The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber +that happens to be made up of multiple processors. Messages in a topic will be distributed to all +Logstash instances with the same `group_id` + +[id="{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms"] +===== `heartbeat_interval_ms` + + * Value type is <> + * There is no default value for this setting. + +The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure +that the consumer's session stays active and to facilitate rebalancing when new +consumers join or leave the group. The value must be set lower than +`session.timeout.ms`, but typically should be set no higher than 1/3 of that value. +It can be adjusted even lower to control the expected time for normal rebalances. + +[id="{version}-plugins-{type}s-{plugin}-jaas_path"] +===== `jaas_path` + + * Value type is <> + * There is no default value for this setting. + +The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization +services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: +[source,java] +---------------------------------- +KafkaClient { + com.sun.security.auth.module.Krb5LoginModule required + useTicketCache=true + renewTicket=true + serviceName="kafka"; + }; +---------------------------------- + +Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these +to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same +`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on +different JVM instances. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] +===== `kerberos_config` + + * Value type is <> + * There is no default value for this setting. + +Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html + +[id="{version}-plugins-{type}s-{plugin}-key_deserializer_class"] +===== `key_deserializer_class` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` + +Java Class used to deserialize the record's key + +[id="{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes"] +===== `max_partition_fetch_bytes` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of data per-partition the server will return. The maximum total memory used for a +request will be #partitions * max.partition.fetch.bytes. This size must be at least +as large as the maximum message size the server allows or else it is possible for the producer to +send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying +to fetch a large message on a certain partition. + +[id="{version}-plugins-{type}s-{plugin}-max_poll_interval_ms"] +===== `max_poll_interval_ms` + + * Value type is <> + * There is no default value for this setting. + +The maximum delay between invocations of poll() when using consumer group management. This places +an upper bound on the amount of time that the consumer can be idle before fetching more records. +If poll() is not called before expiration of this timeout, then the consumer is considered failed and +the group will rebalance in order to reassign the partitions to another member. +The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms + +[id="{version}-plugins-{type}s-{plugin}-max_poll_records"] +===== `max_poll_records` + + * Value type is <> + * There is no default value for this setting. + +The maximum number of records returned in a single call to poll(). + +[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] +===== `metadata_max_age_ms` + + * Value type is <> + * There is no default value for this setting. + +The period of time in milliseconds after which we force a refresh of metadata even if +we haven't seen any partition leadership changes to proactively discover any new brokers or partitions + +[id="{version}-plugins-{type}s-{plugin}-partition_assignment_strategy"] +===== `partition_assignment_strategy` + + * Value type is <> + * There is no default value for this setting. + +The class name of the partition assignment strategy that the client will use to distribute +partition ownership amongst consumer instances + +[id="{version}-plugins-{type}s-{plugin}-poll_timeout_ms"] +===== `poll_timeout_ms` + + * Value type is <> + * Default value is `100` + +Time kafka consumer will wait to receive new messages from topics + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] +===== `reconnect_backoff_ms` + + * Value type is <> + * There is no default value for this setting. + +The amount of time to wait before attempting to reconnect to a given host. +This avoids repeatedly connecting to a host in a tight loop. +This backoff applies to all requests sent by the consumer to the broker. + +[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] +===== `request_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The configuration controls the maximum amount of time the client will wait +for the response of a request. If the response is not received before the timeout +elapses the client will resend the request if necessary or fail the request if +retries are exhausted. + +[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] +===== `retry_backoff_ms` + + * Value type is <> + * There is no default value for this setting. + +The amount of time to wait before attempting to retry a failed fetch request +to a given topic partition. This avoids repeated fetching-and-failing in a tight loop. + +[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] +===== `sasl_kerberos_service_name` + + * Value type is <> + * There is no default value for this setting. + +The Kerberos principal name that Kafka broker runs as. +This can be defined either in Kafka's JAAS config or in Kafka's config. + +[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] +===== `sasl_mechanism` + + * Value type is <> + * Default value is `"GSSAPI"` + +http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. +This may be any mechanism for which a security provider is available. +GSSAPI is the default mechanism. + +[id="{version}-plugins-{type}s-{plugin}-security_protocol"] +===== `security_protocol` + + * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` + * Default value is `"PLAINTEXT"` + +Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL + +[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] +===== `send_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The size of the TCP send buffer (SO_SNDBUF) to use when sending data + +[id="{version}-plugins-{type}s-{plugin}-session_timeout_ms"] +===== `session_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead +and a rebalance operation is triggered for the group identified by `group_id` + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `false` + +Enable SSL/TLS secured communication to Kafka broker. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] +===== `ssl_key_password` + + * Value type is <> + * There is no default value for this setting. + +The password of the private key in the key store file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] +===== `ssl_keystore_location` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore path. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value type is <> + * There is no default value for this setting. + +The keystore type. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] +===== `ssl_truststore_location` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore path to validate the Kafka broker's certificate. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is <> + * There is no default value for this setting. + +The truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + + * Value type is <> + * There is no default value for this setting. + +The truststore type. + +[id="{version}-plugins-{type}s-{plugin}-topics"] +===== `topics` + + * Value type is <> + * Default value is `["logstash"]` + +A list of topics to subscribe to, defaults to ["logstash"]. + +[id="{version}-plugins-{type}s-{plugin}-topics_pattern"] +===== `topics_pattern` + + * Value type is <> + * There is no default value for this setting. + +A topic regex pattern to subscribe to. +The topics configuration will be ignored when using this configuration. + +[id="{version}-plugins-{type}s-{plugin}-value_deserializer_class"] +===== `value_deserializer_class` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` + +Java Class used to deserialize the record's value + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/kafka-v6.3.3.asciidoc b/docs/versioned-plugins/inputs/kafka-v6.3.3.asciidoc new file mode 100644 index 000000000..b33854113 --- /dev/null +++ b/docs/versioned-plugins/inputs/kafka-v6.3.3.asciidoc @@ -0,0 +1,553 @@ +:plugin: kafka +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v6.3.3 +:release_date: 2017-07-11 +:changelog_url: https://github.com/logstash-plugins/logstash-input-kafka/blob/v6.3.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Kafka input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will read events from a Kafka topic. It uses the 0.10 version of +the consumer API provided by Kafka to read messages from the broker. + +Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination +of Logstash and the Kafka input plugin: + +[options="header"] +|========================================================== +|Kafka Client Version |Logstash Version |Plugin Version |Why? +|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular +|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) +|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) +|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker +|0.10.1.x |2.4.x - 5.x.x | 6.x.x | +|0.11.0.0 |2.4.x - 5.x.x | 6.x.x |Not compatible with the <= 0.9 broker +|========================================================== + +NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should +upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker +is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. + +This input supports connecting to Kafka over: + +* SSL (requires plugin version 3.0.0 or later) +* Kerberos SASL (requires plugin version 5.1.0 or later) + +By default security is disabled but can be turned on as needed. + +The Logstash Kafka consumer handles group management and uses the default offset management +strategy using Kafka topics. + +Logstash instances by default form a single logical group to subscribe to Kafka topics +Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively, +you could run multiple Logstash instances with the same `group_id` to spread the load across +physical machines. Messages in a topic will be distributed to all Logstash instances with +the same `group_id`. + +Ideally you should have as many threads as the number of partitions for a perfect balance -- +more threads than partitions means that some threads will be idle + +For more information see http://kafka.apache.org/documentation.html#theconsumer + +Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kafka Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-auto_offset_reset>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-check_crcs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connections_max_idle_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-consumer_threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-decorate_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-enable_auto_commit>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_internal_topics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_max_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_min_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-group_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-key_deserializer_class>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_poll_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_poll_records>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-partition_assignment_strategy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-poll_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No +| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topics_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-value_deserializer_class>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms"] +===== `auto_commit_interval_ms` + + * Value type is <> + * Default value is `"5000"` + +The frequency in milliseconds that the consumer offsets are committed to Kafka. + +[id="{version}-plugins-{type}s-{plugin}-auto_offset_reset"] +===== `auto_offset_reset` + + * Value type is <> + * There is no default value for this setting. + +What to do when there is no initial offset in Kafka or if an offset is out of range: + +* earliest: automatically reset the offset to the earliest offset +* latest: automatically reset the offset to the latest offset +* none: throw exception to the consumer if no previous offset is found for the consumer's group +* anything else: throw exception to the consumer. + +[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] +===== `bootstrap_servers` + + * Value type is <> + * Default value is `"localhost:9092"` + +A list of URLs of Kafka instances to use for establishing the initial connection to the cluster. +This list should be in the form of `host1:port1,host2:port2` These urls are just used +for the initial connection to discover the full cluster membership (which may change dynamically) +so this list need not contain the full set of servers (you may want more than one, though, in +case a server is down). + +[id="{version}-plugins-{type}s-{plugin}-check_crcs"] +===== `check_crcs` + + * Value type is <> + * There is no default value for this setting. + +Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk +corruption to the messages occurred. This check adds some overhead, so it may be +disabled in cases seeking extreme performance. + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * Value type is <> + * Default value is `"logstash"` + +The id string to pass to the server when making requests. The purpose of this +is to be able to track the source of requests beyond just ip/port by allowing +a logical application name to be included. + +[id="{version}-plugins-{type}s-{plugin}-connections_max_idle_ms"] +===== `connections_max_idle_ms` + + * Value type is <> + * There is no default value for this setting. + +Close idle connections after the number of milliseconds specified by this config. + +[id="{version}-plugins-{type}s-{plugin}-consumer_threads"] +===== `consumer_threads` + + * Value type is <> + * Default value is `1` + +Ideally you should have as many threads as the number of partitions for a perfect +balance — more threads than partitions means that some threads will be idle + +[id="{version}-plugins-{type}s-{plugin}-decorate_events"] +===== `decorate_events` + + * Value type is <> + * Default value is `false` + +Option to add Kafka metadata like topic, message size to the event. +This will add a field named `kafka` to the logstash event containing the following attributes: + `topic`: The topic this message is associated with + `consumer_group`: The consumer group used to read in this event + `partition`: The partition this message is associated with + `offset`: The offset from the partition this message is associated with + `key`: A ByteBuffer containing the message key + +[id="{version}-plugins-{type}s-{plugin}-enable_auto_commit"] +===== `enable_auto_commit` + + * Value type is <> + * Default value is `"true"` + +If true, periodically commit to Kafka the offsets of messages already returned by the consumer. +This committed offset will be used when the process fails as the position from +which the consumption will begin. + +[id="{version}-plugins-{type}s-{plugin}-exclude_internal_topics"] +===== `exclude_internal_topics` + + * Value type is <> + * There is no default value for this setting. + +Whether records from internal topics (such as offsets) should be exposed to the consumer. +If set to true the only way to receive records from an internal topic is subscribing to it. + +[id="{version}-plugins-{type}s-{plugin}-fetch_max_bytes"] +===== `fetch_max_bytes` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of data the server should return for a fetch request. This is not an +absolute maximum, if the first message in the first non-empty partition of the fetch is larger +than this value, the message will still be returned to ensure that the consumer can make progress. + +[id="{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms"] +===== `fetch_max_wait_ms` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of time the server will block before answering the fetch request if +there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This +should be less than or equal to the timeout used in `poll_timeout_ms` + +[id="{version}-plugins-{type}s-{plugin}-fetch_min_bytes"] +===== `fetch_min_bytes` + + * Value type is <> + * There is no default value for this setting. + +The minimum amount of data the server should return for a fetch request. If insufficient +data is available the request will wait for that much data to accumulate +before answering the request. + +[id="{version}-plugins-{type}s-{plugin}-group_id"] +===== `group_id` + + * Value type is <> + * Default value is `"logstash"` + +The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber +that happens to be made up of multiple processors. Messages in a topic will be distributed to all +Logstash instances with the same `group_id` + +[id="{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms"] +===== `heartbeat_interval_ms` + + * Value type is <> + * There is no default value for this setting. + +The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure +that the consumer's session stays active and to facilitate rebalancing when new +consumers join or leave the group. The value must be set lower than +`session.timeout.ms`, but typically should be set no higher than 1/3 of that value. +It can be adjusted even lower to control the expected time for normal rebalances. + +[id="{version}-plugins-{type}s-{plugin}-jaas_path"] +===== `jaas_path` + + * Value type is <> + * There is no default value for this setting. + +The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization +services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: +[source,java] +---------------------------------- +KafkaClient { + com.sun.security.auth.module.Krb5LoginModule required + useTicketCache=true + renewTicket=true + serviceName="kafka"; + }; +---------------------------------- + +Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these +to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same +`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on +different JVM instances. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] +===== `kerberos_config` + + * Value type is <> + * There is no default value for this setting. + +Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html + +[id="{version}-plugins-{type}s-{plugin}-key_deserializer_class"] +===== `key_deserializer_class` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` + +Java Class used to deserialize the record's key + +[id="{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes"] +===== `max_partition_fetch_bytes` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of data per-partition the server will return. The maximum total memory used for a +request will be #partitions * max.partition.fetch.bytes. This size must be at least +as large as the maximum message size the server allows or else it is possible for the producer to +send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying +to fetch a large message on a certain partition. + +[id="{version}-plugins-{type}s-{plugin}-max_poll_interval_ms"] +===== `max_poll_interval_ms` + + * Value type is <> + * There is no default value for this setting. + +The maximum delay between invocations of poll() when using consumer group management. This places +an upper bound on the amount of time that the consumer can be idle before fetching more records. +If poll() is not called before expiration of this timeout, then the consumer is considered failed and +the group will rebalance in order to reassign the partitions to another member. +The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms + +[id="{version}-plugins-{type}s-{plugin}-max_poll_records"] +===== `max_poll_records` + + * Value type is <> + * There is no default value for this setting. + +The maximum number of records returned in a single call to poll(). + +[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] +===== `metadata_max_age_ms` + + * Value type is <> + * There is no default value for this setting. + +The period of time in milliseconds after which we force a refresh of metadata even if +we haven't seen any partition leadership changes to proactively discover any new brokers or partitions + +[id="{version}-plugins-{type}s-{plugin}-partition_assignment_strategy"] +===== `partition_assignment_strategy` + + * Value type is <> + * There is no default value for this setting. + +The class name of the partition assignment strategy that the client will use to distribute +partition ownership amongst consumer instances + +[id="{version}-plugins-{type}s-{plugin}-poll_timeout_ms"] +===== `poll_timeout_ms` + + * Value type is <> + * Default value is `100` + +Time kafka consumer will wait to receive new messages from topics + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] +===== `reconnect_backoff_ms` + + * Value type is <> + * There is no default value for this setting. + +The amount of time to wait before attempting to reconnect to a given host. +This avoids repeatedly connecting to a host in a tight loop. +This backoff applies to all requests sent by the consumer to the broker. + +[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] +===== `request_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The configuration controls the maximum amount of time the client will wait +for the response of a request. If the response is not received before the timeout +elapses the client will resend the request if necessary or fail the request if +retries are exhausted. + +[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] +===== `retry_backoff_ms` + + * Value type is <> + * There is no default value for this setting. + +The amount of time to wait before attempting to retry a failed fetch request +to a given topic partition. This avoids repeated fetching-and-failing in a tight loop. + +[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] +===== `sasl_kerberos_service_name` + + * Value type is <> + * There is no default value for this setting. + +The Kerberos principal name that Kafka broker runs as. +This can be defined either in Kafka's JAAS config or in Kafka's config. + +[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] +===== `sasl_mechanism` + + * Value type is <> + * Default value is `"GSSAPI"` + +http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. +This may be any mechanism for which a security provider is available. +GSSAPI is the default mechanism. + +[id="{version}-plugins-{type}s-{plugin}-security_protocol"] +===== `security_protocol` + + * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` + * Default value is `"PLAINTEXT"` + +Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL + +[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] +===== `send_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The size of the TCP send buffer (SO_SNDBUF) to use when sending data + +[id="{version}-plugins-{type}s-{plugin}-session_timeout_ms"] +===== `session_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead +and a rebalance operation is triggered for the group identified by `group_id` + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `false` + +Enable SSL/TLS secured communication to Kafka broker. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] +===== `ssl_key_password` + + * Value type is <> + * There is no default value for this setting. + +The password of the private key in the key store file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] +===== `ssl_keystore_location` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore path. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value type is <> + * There is no default value for this setting. + +The keystore type. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] +===== `ssl_truststore_location` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore path to validate the Kafka broker's certificate. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is <> + * There is no default value for this setting. + +The truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + + * Value type is <> + * There is no default value for this setting. + +The truststore type. + +[id="{version}-plugins-{type}s-{plugin}-topics"] +===== `topics` + + * Value type is <> + * Default value is `["logstash"]` + +A list of topics to subscribe to, defaults to ["logstash"]. + +[id="{version}-plugins-{type}s-{plugin}-topics_pattern"] +===== `topics_pattern` + + * Value type is <> + * There is no default value for this setting. + +A topic regex pattern to subscribe to. +The topics configuration will be ignored when using this configuration. + +[id="{version}-plugins-{type}s-{plugin}-value_deserializer_class"] +===== `value_deserializer_class` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` + +Java Class used to deserialize the record's value + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/kafka-v6.3.4.asciidoc b/docs/versioned-plugins/inputs/kafka-v6.3.4.asciidoc new file mode 100644 index 000000000..c8560778f --- /dev/null +++ b/docs/versioned-plugins/inputs/kafka-v6.3.4.asciidoc @@ -0,0 +1,553 @@ +:plugin: kafka +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v6.3.4 +:release_date: 2017-07-13 +:changelog_url: https://github.com/logstash-plugins/logstash-input-kafka/blob/v6.3.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Kafka input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will read events from a Kafka topic. It uses the 0.10 version of +the consumer API provided by Kafka to read messages from the broker. + +Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination +of Logstash and the Kafka input plugin: + +[options="header"] +|========================================================== +|Kafka Client Version |Logstash Version |Plugin Version |Why? +|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular +|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) +|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) +|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker +|0.10.1.x |2.4.x - 5.x.x | 6.x.x | +|0.11.0.0 |2.4.x - 5.x.x | 6.x.x |Not compatible with the <= 0.9 broker +|========================================================== + +NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should +upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker +is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. + +This input supports connecting to Kafka over: + +* SSL (requires plugin version 3.0.0 or later) +* Kerberos SASL (requires plugin version 5.1.0 or later) + +By default security is disabled but can be turned on as needed. + +The Logstash Kafka consumer handles group management and uses the default offset management +strategy using Kafka topics. + +Logstash instances by default form a single logical group to subscribe to Kafka topics +Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively, +you could run multiple Logstash instances with the same `group_id` to spread the load across +physical machines. Messages in a topic will be distributed to all Logstash instances with +the same `group_id`. + +Ideally you should have as many threads as the number of partitions for a perfect balance -- +more threads than partitions means that some threads will be idle + +For more information see http://kafka.apache.org/documentation.html#theconsumer + +Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kafka Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-auto_offset_reset>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-check_crcs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connections_max_idle_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-consumer_threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-decorate_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-enable_auto_commit>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_internal_topics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_max_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_min_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-group_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-key_deserializer_class>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_poll_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_poll_records>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-partition_assignment_strategy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-poll_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No +| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topics_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-value_deserializer_class>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms"] +===== `auto_commit_interval_ms` + + * Value type is <> + * Default value is `"5000"` + +The frequency in milliseconds that the consumer offsets are committed to Kafka. + +[id="{version}-plugins-{type}s-{plugin}-auto_offset_reset"] +===== `auto_offset_reset` + + * Value type is <> + * There is no default value for this setting. + +What to do when there is no initial offset in Kafka or if an offset is out of range: + +* earliest: automatically reset the offset to the earliest offset +* latest: automatically reset the offset to the latest offset +* none: throw exception to the consumer if no previous offset is found for the consumer's group +* anything else: throw exception to the consumer. + +[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] +===== `bootstrap_servers` + + * Value type is <> + * Default value is `"localhost:9092"` + +A list of URLs of Kafka instances to use for establishing the initial connection to the cluster. +This list should be in the form of `host1:port1,host2:port2` These urls are just used +for the initial connection to discover the full cluster membership (which may change dynamically) +so this list need not contain the full set of servers (you may want more than one, though, in +case a server is down). + +[id="{version}-plugins-{type}s-{plugin}-check_crcs"] +===== `check_crcs` + + * Value type is <> + * There is no default value for this setting. + +Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk +corruption to the messages occurred. This check adds some overhead, so it may be +disabled in cases seeking extreme performance. + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * Value type is <> + * Default value is `"logstash"` + +The id string to pass to the server when making requests. The purpose of this +is to be able to track the source of requests beyond just ip/port by allowing +a logical application name to be included. + +[id="{version}-plugins-{type}s-{plugin}-connections_max_idle_ms"] +===== `connections_max_idle_ms` + + * Value type is <> + * There is no default value for this setting. + +Close idle connections after the number of milliseconds specified by this config. + +[id="{version}-plugins-{type}s-{plugin}-consumer_threads"] +===== `consumer_threads` + + * Value type is <> + * Default value is `1` + +Ideally you should have as many threads as the number of partitions for a perfect +balance — more threads than partitions means that some threads will be idle + +[id="{version}-plugins-{type}s-{plugin}-decorate_events"] +===== `decorate_events` + + * Value type is <> + * Default value is `false` + +Option to add Kafka metadata like topic, message size to the event. +This will add a field named `kafka` to the logstash event containing the following attributes: + `topic`: The topic this message is associated with + `consumer_group`: The consumer group used to read in this event + `partition`: The partition this message is associated with + `offset`: The offset from the partition this message is associated with + `key`: A ByteBuffer containing the message key + +[id="{version}-plugins-{type}s-{plugin}-enable_auto_commit"] +===== `enable_auto_commit` + + * Value type is <> + * Default value is `"true"` + +If true, periodically commit to Kafka the offsets of messages already returned by the consumer. +This committed offset will be used when the process fails as the position from +which the consumption will begin. + +[id="{version}-plugins-{type}s-{plugin}-exclude_internal_topics"] +===== `exclude_internal_topics` + + * Value type is <> + * There is no default value for this setting. + +Whether records from internal topics (such as offsets) should be exposed to the consumer. +If set to true the only way to receive records from an internal topic is subscribing to it. + +[id="{version}-plugins-{type}s-{plugin}-fetch_max_bytes"] +===== `fetch_max_bytes` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of data the server should return for a fetch request. This is not an +absolute maximum, if the first message in the first non-empty partition of the fetch is larger +than this value, the message will still be returned to ensure that the consumer can make progress. + +[id="{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms"] +===== `fetch_max_wait_ms` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of time the server will block before answering the fetch request if +there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This +should be less than or equal to the timeout used in `poll_timeout_ms` + +[id="{version}-plugins-{type}s-{plugin}-fetch_min_bytes"] +===== `fetch_min_bytes` + + * Value type is <> + * There is no default value for this setting. + +The minimum amount of data the server should return for a fetch request. If insufficient +data is available the request will wait for that much data to accumulate +before answering the request. + +[id="{version}-plugins-{type}s-{plugin}-group_id"] +===== `group_id` + + * Value type is <> + * Default value is `"logstash"` + +The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber +that happens to be made up of multiple processors. Messages in a topic will be distributed to all +Logstash instances with the same `group_id` + +[id="{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms"] +===== `heartbeat_interval_ms` + + * Value type is <> + * There is no default value for this setting. + +The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure +that the consumer's session stays active and to facilitate rebalancing when new +consumers join or leave the group. The value must be set lower than +`session.timeout.ms`, but typically should be set no higher than 1/3 of that value. +It can be adjusted even lower to control the expected time for normal rebalances. + +[id="{version}-plugins-{type}s-{plugin}-jaas_path"] +===== `jaas_path` + + * Value type is <> + * There is no default value for this setting. + +The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization +services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: +[source,java] +---------------------------------- +KafkaClient { + com.sun.security.auth.module.Krb5LoginModule required + useTicketCache=true + renewTicket=true + serviceName="kafka"; + }; +---------------------------------- + +Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these +to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same +`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on +different JVM instances. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] +===== `kerberos_config` + + * Value type is <> + * There is no default value for this setting. + +Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html + +[id="{version}-plugins-{type}s-{plugin}-key_deserializer_class"] +===== `key_deserializer_class` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` + +Java Class used to deserialize the record's key + +[id="{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes"] +===== `max_partition_fetch_bytes` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of data per-partition the server will return. The maximum total memory used for a +request will be #partitions * max.partition.fetch.bytes. This size must be at least +as large as the maximum message size the server allows or else it is possible for the producer to +send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying +to fetch a large message on a certain partition. + +[id="{version}-plugins-{type}s-{plugin}-max_poll_interval_ms"] +===== `max_poll_interval_ms` + + * Value type is <> + * There is no default value for this setting. + +The maximum delay between invocations of poll() when using consumer group management. This places +an upper bound on the amount of time that the consumer can be idle before fetching more records. +If poll() is not called before expiration of this timeout, then the consumer is considered failed and +the group will rebalance in order to reassign the partitions to another member. +The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms + +[id="{version}-plugins-{type}s-{plugin}-max_poll_records"] +===== `max_poll_records` + + * Value type is <> + * There is no default value for this setting. + +The maximum number of records returned in a single call to poll(). + +[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] +===== `metadata_max_age_ms` + + * Value type is <> + * There is no default value for this setting. + +The period of time in milliseconds after which we force a refresh of metadata even if +we haven't seen any partition leadership changes to proactively discover any new brokers or partitions + +[id="{version}-plugins-{type}s-{plugin}-partition_assignment_strategy"] +===== `partition_assignment_strategy` + + * Value type is <> + * There is no default value for this setting. + +The class name of the partition assignment strategy that the client will use to distribute +partition ownership amongst consumer instances + +[id="{version}-plugins-{type}s-{plugin}-poll_timeout_ms"] +===== `poll_timeout_ms` + + * Value type is <> + * Default value is `100` + +Time kafka consumer will wait to receive new messages from topics + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] +===== `reconnect_backoff_ms` + + * Value type is <> + * There is no default value for this setting. + +The amount of time to wait before attempting to reconnect to a given host. +This avoids repeatedly connecting to a host in a tight loop. +This backoff applies to all requests sent by the consumer to the broker. + +[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] +===== `request_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The configuration controls the maximum amount of time the client will wait +for the response of a request. If the response is not received before the timeout +elapses the client will resend the request if necessary or fail the request if +retries are exhausted. + +[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] +===== `retry_backoff_ms` + + * Value type is <> + * There is no default value for this setting. + +The amount of time to wait before attempting to retry a failed fetch request +to a given topic partition. This avoids repeated fetching-and-failing in a tight loop. + +[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] +===== `sasl_kerberos_service_name` + + * Value type is <> + * There is no default value for this setting. + +The Kerberos principal name that Kafka broker runs as. +This can be defined either in Kafka's JAAS config or in Kafka's config. + +[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] +===== `sasl_mechanism` + + * Value type is <> + * Default value is `"GSSAPI"` + +http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. +This may be any mechanism for which a security provider is available. +GSSAPI is the default mechanism. + +[id="{version}-plugins-{type}s-{plugin}-security_protocol"] +===== `security_protocol` + + * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` + * Default value is `"PLAINTEXT"` + +Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL + +[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] +===== `send_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The size of the TCP send buffer (SO_SNDBUF) to use when sending data + +[id="{version}-plugins-{type}s-{plugin}-session_timeout_ms"] +===== `session_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead +and a rebalance operation is triggered for the group identified by `group_id` + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `false` + +Enable SSL/TLS secured communication to Kafka broker. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] +===== `ssl_key_password` + + * Value type is <> + * There is no default value for this setting. + +The password of the private key in the key store file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] +===== `ssl_keystore_location` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore path. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value type is <> + * There is no default value for this setting. + +The keystore type. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] +===== `ssl_truststore_location` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore path to validate the Kafka broker's certificate. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is <> + * There is no default value for this setting. + +The truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + + * Value type is <> + * There is no default value for this setting. + +The truststore type. + +[id="{version}-plugins-{type}s-{plugin}-topics"] +===== `topics` + + * Value type is <> + * Default value is `["logstash"]` + +A list of topics to subscribe to, defaults to ["logstash"]. + +[id="{version}-plugins-{type}s-{plugin}-topics_pattern"] +===== `topics_pattern` + + * Value type is <> + * There is no default value for this setting. + +A topic regex pattern to subscribe to. +The topics configuration will be ignored when using this configuration. + +[id="{version}-plugins-{type}s-{plugin}-value_deserializer_class"] +===== `value_deserializer_class` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` + +Java Class used to deserialize the record's value + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/kafka-v7.0.0.asciidoc b/docs/versioned-plugins/inputs/kafka-v7.0.0.asciidoc new file mode 100644 index 000000000..0f0c1d321 --- /dev/null +++ b/docs/versioned-plugins/inputs/kafka-v7.0.0.asciidoc @@ -0,0 +1,566 @@ +:plugin: kafka +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.0.0 +:release_date: 2017-07-18 +:changelog_url: https://github.com/logstash-plugins/logstash-input-kafka/blob/v7.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Kafka input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will read events from a Kafka topic. It uses the 0.10 version of +the consumer API provided by Kafka to read messages from the broker. + +Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination +of Logstash and the Kafka input plugin: + +[options="header"] +|========================================================== +|Kafka Client Version |Logstash Version |Plugin Version |Why? +|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular +|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) +|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) +|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker +|0.10.1.x |2.4.x - 5.x.x | 6.x.x | +|0.11.0.0 |2.4.x - 5.x.x | 6.x.x |Not compatible with the <= 0.9 broker +|========================================================== + +NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should +upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker +is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. + +This input supports connecting to Kafka over: + +* SSL (requires plugin version 3.0.0 or later) +* Kerberos SASL (requires plugin version 5.1.0 or later) + +By default security is disabled but can be turned on as needed. + +The Logstash Kafka consumer handles group management and uses the default offset management +strategy using Kafka topics. + +Logstash instances by default form a single logical group to subscribe to Kafka topics +Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively, +you could run multiple Logstash instances with the same `group_id` to spread the load across +physical machines. Messages in a topic will be distributed to all Logstash instances with +the same `group_id`. + +Ideally you should have as many threads as the number of partitions for a perfect balance -- +more threads than partitions means that some threads will be idle + +For more information see http://kafka.apache.org/documentation.html#theconsumer + +Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs + +==== Metadata fields + +The following metadata from Kafka broker are added under the `[@metadata]` field: + +* `[@metadata][kafka][topic]`: Original Kafka topic from where the message was consumed. +* `[@metadata][kafka][consumer_group]`: Consumer group +* `[@metadata][kafka][partition]`: Partition info for this message. +* `[@metadata][kafka][offset]`: Original record offset for this message. +* `[@metadata][kafka][key]`: Record key, if any. +* `[@metadata][kafka][timestamp]`: Timestamp when this message was received by the Kafka broker. + +Please note that `@metadata` fields are not part of any of your events at output time. If you need these information to be +inserted into your original event, you'll have to use the `mutate` filter to manually copy the required fields into your `event`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kafka Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-auto_offset_reset>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-check_crcs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connections_max_idle_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-consumer_threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-decorate_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-enable_auto_commit>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_internal_topics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_max_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_min_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-group_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-key_deserializer_class>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_poll_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_poll_records>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-partition_assignment_strategy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-poll_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No +| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topics_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-value_deserializer_class>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms"] +===== `auto_commit_interval_ms` + + * Value type is <> + * Default value is `"5000"` + +The frequency in milliseconds that the consumer offsets are committed to Kafka. + +[id="{version}-plugins-{type}s-{plugin}-auto_offset_reset"] +===== `auto_offset_reset` + + * Value type is <> + * There is no default value for this setting. + +What to do when there is no initial offset in Kafka or if an offset is out of range: + +* earliest: automatically reset the offset to the earliest offset +* latest: automatically reset the offset to the latest offset +* none: throw exception to the consumer if no previous offset is found for the consumer's group +* anything else: throw exception to the consumer. + +[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] +===== `bootstrap_servers` + + * Value type is <> + * Default value is `"localhost:9092"` + +A list of URLs of Kafka instances to use for establishing the initial connection to the cluster. +This list should be in the form of `host1:port1,host2:port2` These urls are just used +for the initial connection to discover the full cluster membership (which may change dynamically) +so this list need not contain the full set of servers (you may want more than one, though, in +case a server is down). + +[id="{version}-plugins-{type}s-{plugin}-check_crcs"] +===== `check_crcs` + + * Value type is <> + * There is no default value for this setting. + +Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk +corruption to the messages occurred. This check adds some overhead, so it may be +disabled in cases seeking extreme performance. + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * Value type is <> + * Default value is `"logstash"` + +The id string to pass to the server when making requests. The purpose of this +is to be able to track the source of requests beyond just ip/port by allowing +a logical application name to be included. + +[id="{version}-plugins-{type}s-{plugin}-connections_max_idle_ms"] +===== `connections_max_idle_ms` + + * Value type is <> + * There is no default value for this setting. + +Close idle connections after the number of milliseconds specified by this config. + +[id="{version}-plugins-{type}s-{plugin}-consumer_threads"] +===== `consumer_threads` + + * Value type is <> + * Default value is `1` + +Ideally you should have as many threads as the number of partitions for a perfect +balance — more threads than partitions means that some threads will be idle + +[id="{version}-plugins-{type}s-{plugin}-decorate_events"] +===== `decorate_events` + + * Value type is <> + * Default value is `false` + +Option to add Kafka metadata like topic, message size to the event. +This will add a field named `kafka` to the logstash event containing the following attributes: + `topic`: The topic this message is associated with + `consumer_group`: The consumer group used to read in this event + `partition`: The partition this message is associated with + `offset`: The offset from the partition this message is associated with + `key`: A ByteBuffer containing the message key + +[id="{version}-plugins-{type}s-{plugin}-enable_auto_commit"] +===== `enable_auto_commit` + + * Value type is <> + * Default value is `"true"` + +If true, periodically commit to Kafka the offsets of messages already returned by the consumer. +This committed offset will be used when the process fails as the position from +which the consumption will begin. + +[id="{version}-plugins-{type}s-{plugin}-exclude_internal_topics"] +===== `exclude_internal_topics` + + * Value type is <> + * There is no default value for this setting. + +Whether records from internal topics (such as offsets) should be exposed to the consumer. +If set to true the only way to receive records from an internal topic is subscribing to it. + +[id="{version}-plugins-{type}s-{plugin}-fetch_max_bytes"] +===== `fetch_max_bytes` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of data the server should return for a fetch request. This is not an +absolute maximum, if the first message in the first non-empty partition of the fetch is larger +than this value, the message will still be returned to ensure that the consumer can make progress. + +[id="{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms"] +===== `fetch_max_wait_ms` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of time the server will block before answering the fetch request if +there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This +should be less than or equal to the timeout used in `poll_timeout_ms` + +[id="{version}-plugins-{type}s-{plugin}-fetch_min_bytes"] +===== `fetch_min_bytes` + + * Value type is <> + * There is no default value for this setting. + +The minimum amount of data the server should return for a fetch request. If insufficient +data is available the request will wait for that much data to accumulate +before answering the request. + +[id="{version}-plugins-{type}s-{plugin}-group_id"] +===== `group_id` + + * Value type is <> + * Default value is `"logstash"` + +The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber +that happens to be made up of multiple processors. Messages in a topic will be distributed to all +Logstash instances with the same `group_id` + +[id="{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms"] +===== `heartbeat_interval_ms` + + * Value type is <> + * There is no default value for this setting. + +The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure +that the consumer's session stays active and to facilitate rebalancing when new +consumers join or leave the group. The value must be set lower than +`session.timeout.ms`, but typically should be set no higher than 1/3 of that value. +It can be adjusted even lower to control the expected time for normal rebalances. + +[id="{version}-plugins-{type}s-{plugin}-jaas_path"] +===== `jaas_path` + + * Value type is <> + * There is no default value for this setting. + +The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization +services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: +[source,java] +---------------------------------- +KafkaClient { + com.sun.security.auth.module.Krb5LoginModule required + useTicketCache=true + renewTicket=true + serviceName="kafka"; + }; +---------------------------------- + +Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these +to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same +`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on +different JVM instances. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] +===== `kerberos_config` + + * Value type is <> + * There is no default value for this setting. + +Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html + +[id="{version}-plugins-{type}s-{plugin}-key_deserializer_class"] +===== `key_deserializer_class` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` + +Java Class used to deserialize the record's key + +[id="{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes"] +===== `max_partition_fetch_bytes` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of data per-partition the server will return. The maximum total memory used for a +request will be #partitions * max.partition.fetch.bytes. This size must be at least +as large as the maximum message size the server allows or else it is possible for the producer to +send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying +to fetch a large message on a certain partition. + +[id="{version}-plugins-{type}s-{plugin}-max_poll_interval_ms"] +===== `max_poll_interval_ms` + + * Value type is <> + * There is no default value for this setting. + +The maximum delay between invocations of poll() when using consumer group management. This places +an upper bound on the amount of time that the consumer can be idle before fetching more records. +If poll() is not called before expiration of this timeout, then the consumer is considered failed and +the group will rebalance in order to reassign the partitions to another member. +The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms + +[id="{version}-plugins-{type}s-{plugin}-max_poll_records"] +===== `max_poll_records` + + * Value type is <> + * There is no default value for this setting. + +The maximum number of records returned in a single call to poll(). + +[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] +===== `metadata_max_age_ms` + + * Value type is <> + * There is no default value for this setting. + +The period of time in milliseconds after which we force a refresh of metadata even if +we haven't seen any partition leadership changes to proactively discover any new brokers or partitions + +[id="{version}-plugins-{type}s-{plugin}-partition_assignment_strategy"] +===== `partition_assignment_strategy` + + * Value type is <> + * There is no default value for this setting. + +The class name of the partition assignment strategy that the client will use to distribute +partition ownership amongst consumer instances + +[id="{version}-plugins-{type}s-{plugin}-poll_timeout_ms"] +===== `poll_timeout_ms` + + * Value type is <> + * Default value is `100` + +Time kafka consumer will wait to receive new messages from topics + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] +===== `reconnect_backoff_ms` + + * Value type is <> + * There is no default value for this setting. + +The amount of time to wait before attempting to reconnect to a given host. +This avoids repeatedly connecting to a host in a tight loop. +This backoff applies to all requests sent by the consumer to the broker. + +[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] +===== `request_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The configuration controls the maximum amount of time the client will wait +for the response of a request. If the response is not received before the timeout +elapses the client will resend the request if necessary or fail the request if +retries are exhausted. + +[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] +===== `retry_backoff_ms` + + * Value type is <> + * There is no default value for this setting. + +The amount of time to wait before attempting to retry a failed fetch request +to a given topic partition. This avoids repeated fetching-and-failing in a tight loop. + +[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] +===== `sasl_kerberos_service_name` + + * Value type is <> + * There is no default value for this setting. + +The Kerberos principal name that Kafka broker runs as. +This can be defined either in Kafka's JAAS config or in Kafka's config. + +[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] +===== `sasl_mechanism` + + * Value type is <> + * Default value is `"GSSAPI"` + +http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. +This may be any mechanism for which a security provider is available. +GSSAPI is the default mechanism. + +[id="{version}-plugins-{type}s-{plugin}-security_protocol"] +===== `security_protocol` + + * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` + * Default value is `"PLAINTEXT"` + +Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL + +[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] +===== `send_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The size of the TCP send buffer (SO_SNDBUF) to use when sending data + +[id="{version}-plugins-{type}s-{plugin}-session_timeout_ms"] +===== `session_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead +and a rebalance operation is triggered for the group identified by `group_id` + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `false` + +Enable SSL/TLS secured communication to Kafka broker. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] +===== `ssl_key_password` + + * Value type is <> + * There is no default value for this setting. + +The password of the private key in the key store file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] +===== `ssl_keystore_location` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore path. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value type is <> + * There is no default value for this setting. + +The keystore type. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] +===== `ssl_truststore_location` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore path to validate the Kafka broker's certificate. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is <> + * There is no default value for this setting. + +The truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + + * Value type is <> + * There is no default value for this setting. + +The truststore type. + +[id="{version}-plugins-{type}s-{plugin}-topics"] +===== `topics` + + * Value type is <> + * Default value is `["logstash"]` + +A list of topics to subscribe to, defaults to ["logstash"]. + +[id="{version}-plugins-{type}s-{plugin}-topics_pattern"] +===== `topics_pattern` + + * Value type is <> + * There is no default value for this setting. + +A topic regex pattern to subscribe to. +The topics configuration will be ignored when using this configuration. + +[id="{version}-plugins-{type}s-{plugin}-value_deserializer_class"] +===== `value_deserializer_class` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` + +Java Class used to deserialize the record's value + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/kafka-v8.0.0.asciidoc b/docs/versioned-plugins/inputs/kafka-v8.0.0.asciidoc new file mode 100644 index 000000000..75474319f --- /dev/null +++ b/docs/versioned-plugins/inputs/kafka-v8.0.0.asciidoc @@ -0,0 +1,557 @@ +:plugin: kafka +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v8.0.0 +:release_date: 2017-08-01 +:changelog_url: https://github.com/logstash-plugins/logstash-input-kafka/blob/v8.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Kafka input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will read events from a Kafka topic. It uses the 0.10 version of +the consumer API provided by Kafka to read messages from the broker. + +Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination +of Logstash and the Kafka input plugin: + +[options="header"] +|========================================================== +|Kafka Client Version |Logstash Version |Plugin Version |Why? +|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular +|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) +|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) +|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker +|0.10.1.x |2.4.x - 5.x.x | 6.x.x | +|0.11.0.0 |2.4.x - 5.x.x | 6.x.x |Not compatible with the <= 0.9 broker +|========================================================== + +NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should +upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker +is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. + +This input supports connecting to Kafka over: + +* SSL (requires plugin version 3.0.0 or later) +* Kerberos SASL (requires plugin version 5.1.0 or later) + +By default security is disabled but can be turned on as needed. + +The Logstash Kafka consumer handles group management and uses the default offset management +strategy using Kafka topics. + +Logstash instances by default form a single logical group to subscribe to Kafka topics +Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively, +you could run multiple Logstash instances with the same `group_id` to spread the load across +physical machines. Messages in a topic will be distributed to all Logstash instances with +the same `group_id`. + +Ideally you should have as many threads as the number of partitions for a perfect balance -- +more threads than partitions means that some threads will be idle + +For more information see http://kafka.apache.org/documentation.html#theconsumer + +Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs + +==== Metadata fields + +The following metadata from Kafka broker are added under the `[@metadata]` field: + +* `[@metadata][kafka][topic]`: Original Kafka topic from where the message was consumed. +* `[@metadata][kafka][consumer_group]`: Consumer group +* `[@metadata][kafka][partition]`: Partition info for this message. +* `[@metadata][kafka][offset]`: Original record offset for this message. +* `[@metadata][kafka][key]`: Record key, if any. +* `[@metadata][kafka][timestamp]`: Timestamp when this message was received by the Kafka broker. + +Please note that `@metadata` fields are not part of any of your events at output time. If you need these information to be +inserted into your original event, you'll have to use the `mutate` filter to manually copy the required fields into your `event`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kafka Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-auto_offset_reset>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-check_crcs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connections_max_idle_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-consumer_threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-decorate_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-enable_auto_commit>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_internal_topics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_max_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_min_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-group_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-key_deserializer_class>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_poll_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_poll_records>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-partition_assignment_strategy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-poll_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No +| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topics_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-value_deserializer_class>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms"] +===== `auto_commit_interval_ms` + + * Value type is <> + * Default value is `"5000"` + +The frequency in milliseconds that the consumer offsets are committed to Kafka. + +[id="{version}-plugins-{type}s-{plugin}-auto_offset_reset"] +===== `auto_offset_reset` + + * Value type is <> + * There is no default value for this setting. + +What to do when there is no initial offset in Kafka or if an offset is out of range: + +* earliest: automatically reset the offset to the earliest offset +* latest: automatically reset the offset to the latest offset +* none: throw exception to the consumer if no previous offset is found for the consumer's group +* anything else: throw exception to the consumer. + +[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] +===== `bootstrap_servers` + + * Value type is <> + * Default value is `"localhost:9092"` + +A list of URLs of Kafka instances to use for establishing the initial connection to the cluster. +This list should be in the form of `host1:port1,host2:port2` These urls are just used +for the initial connection to discover the full cluster membership (which may change dynamically) +so this list need not contain the full set of servers (you may want more than one, though, in +case a server is down). + +[id="{version}-plugins-{type}s-{plugin}-check_crcs"] +===== `check_crcs` + + * Value type is <> + * There is no default value for this setting. + +Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk +corruption to the messages occurred. This check adds some overhead, so it may be +disabled in cases seeking extreme performance. + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * Value type is <> + * Default value is `"logstash"` + +The id string to pass to the server when making requests. The purpose of this +is to be able to track the source of requests beyond just ip/port by allowing +a logical application name to be included. + +[id="{version}-plugins-{type}s-{plugin}-connections_max_idle_ms"] +===== `connections_max_idle_ms` + + * Value type is <> + * There is no default value for this setting. + +Close idle connections after the number of milliseconds specified by this config. + +[id="{version}-plugins-{type}s-{plugin}-consumer_threads"] +===== `consumer_threads` + + * Value type is <> + * Default value is `1` + +Ideally you should have as many threads as the number of partitions for a perfect +balance — more threads than partitions means that some threads will be idle + +[id="{version}-plugins-{type}s-{plugin}-decorate_events"] +===== `decorate_events` + + * Value type is <> + * Default value is `false` + +Option to add Kafka metadata like topic, message size to the event. +This will add a field named `kafka` to the logstash event containing the following attributes: + `topic`: The topic this message is associated with + `consumer_group`: The consumer group used to read in this event + `partition`: The partition this message is associated with + `offset`: The offset from the partition this message is associated with + `key`: A ByteBuffer containing the message key + +[id="{version}-plugins-{type}s-{plugin}-enable_auto_commit"] +===== `enable_auto_commit` + + * Value type is <> + * Default value is `"true"` + +If true, periodically commit to Kafka the offsets of messages already returned by the consumer. +This committed offset will be used when the process fails as the position from +which the consumption will begin. + +[id="{version}-plugins-{type}s-{plugin}-exclude_internal_topics"] +===== `exclude_internal_topics` + + * Value type is <> + * There is no default value for this setting. + +Whether records from internal topics (such as offsets) should be exposed to the consumer. +If set to true the only way to receive records from an internal topic is subscribing to it. + +[id="{version}-plugins-{type}s-{plugin}-fetch_max_bytes"] +===== `fetch_max_bytes` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of data the server should return for a fetch request. This is not an +absolute maximum, if the first message in the first non-empty partition of the fetch is larger +than this value, the message will still be returned to ensure that the consumer can make progress. + +[id="{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms"] +===== `fetch_max_wait_ms` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of time the server will block before answering the fetch request if +there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This +should be less than or equal to the timeout used in `poll_timeout_ms` + +[id="{version}-plugins-{type}s-{plugin}-fetch_min_bytes"] +===== `fetch_min_bytes` + + * Value type is <> + * There is no default value for this setting. + +The minimum amount of data the server should return for a fetch request. If insufficient +data is available the request will wait for that much data to accumulate +before answering the request. + +[id="{version}-plugins-{type}s-{plugin}-group_id"] +===== `group_id` + + * Value type is <> + * Default value is `"logstash"` + +The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber +that happens to be made up of multiple processors. Messages in a topic will be distributed to all +Logstash instances with the same `group_id` + +[id="{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms"] +===== `heartbeat_interval_ms` + + * Value type is <> + * There is no default value for this setting. + +The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure +that the consumer's session stays active and to facilitate rebalancing when new +consumers join or leave the group. The value must be set lower than +`session.timeout.ms`, but typically should be set no higher than 1/3 of that value. +It can be adjusted even lower to control the expected time for normal rebalances. + +[id="{version}-plugins-{type}s-{plugin}-jaas_path"] +===== `jaas_path` + + * Value type is <> + * There is no default value for this setting. + +The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization +services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: +[source,java] +---------------------------------- +KafkaClient { + com.sun.security.auth.module.Krb5LoginModule required + useTicketCache=true + renewTicket=true + serviceName="kafka"; + }; +---------------------------------- + +Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these +to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same +`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on +different JVM instances. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] +===== `kerberos_config` + + * Value type is <> + * There is no default value for this setting. + +Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html + +[id="{version}-plugins-{type}s-{plugin}-key_deserializer_class"] +===== `key_deserializer_class` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` + +Java Class used to deserialize the record's key + +[id="{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes"] +===== `max_partition_fetch_bytes` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of data per-partition the server will return. The maximum total memory used for a +request will be #partitions * max.partition.fetch.bytes. This size must be at least +as large as the maximum message size the server allows or else it is possible for the producer to +send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying +to fetch a large message on a certain partition. + +[id="{version}-plugins-{type}s-{plugin}-max_poll_interval_ms"] +===== `max_poll_interval_ms` + + * Value type is <> + * There is no default value for this setting. + +The maximum delay between invocations of poll() when using consumer group management. This places +an upper bound on the amount of time that the consumer can be idle before fetching more records. +If poll() is not called before expiration of this timeout, then the consumer is considered failed and +the group will rebalance in order to reassign the partitions to another member. +The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms + +[id="{version}-plugins-{type}s-{plugin}-max_poll_records"] +===== `max_poll_records` + + * Value type is <> + * There is no default value for this setting. + +The maximum number of records returned in a single call to poll(). + +[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] +===== `metadata_max_age_ms` + + * Value type is <> + * There is no default value for this setting. + +The period of time in milliseconds after which we force a refresh of metadata even if +we haven't seen any partition leadership changes to proactively discover any new brokers or partitions + +[id="{version}-plugins-{type}s-{plugin}-partition_assignment_strategy"] +===== `partition_assignment_strategy` + + * Value type is <> + * There is no default value for this setting. + +The class name of the partition assignment strategy that the client will use to distribute +partition ownership amongst consumer instances + +[id="{version}-plugins-{type}s-{plugin}-poll_timeout_ms"] +===== `poll_timeout_ms` + + * Value type is <> + * Default value is `100` + +Time kafka consumer will wait to receive new messages from topics + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] +===== `reconnect_backoff_ms` + + * Value type is <> + * There is no default value for this setting. + +The amount of time to wait before attempting to reconnect to a given host. +This avoids repeatedly connecting to a host in a tight loop. +This backoff applies to all requests sent by the consumer to the broker. + +[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] +===== `request_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The configuration controls the maximum amount of time the client will wait +for the response of a request. If the response is not received before the timeout +elapses the client will resend the request if necessary or fail the request if +retries are exhausted. + +[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] +===== `retry_backoff_ms` + + * Value type is <> + * There is no default value for this setting. + +The amount of time to wait before attempting to retry a failed fetch request +to a given topic partition. This avoids repeated fetching-and-failing in a tight loop. + +[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] +===== `sasl_kerberos_service_name` + + * Value type is <> + * There is no default value for this setting. + +The Kerberos principal name that Kafka broker runs as. +This can be defined either in Kafka's JAAS config or in Kafka's config. + +[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] +===== `sasl_mechanism` + + * Value type is <> + * Default value is `"GSSAPI"` + +http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. +This may be any mechanism for which a security provider is available. +GSSAPI is the default mechanism. + +[id="{version}-plugins-{type}s-{plugin}-security_protocol"] +===== `security_protocol` + + * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` + * Default value is `"PLAINTEXT"` + +Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL + +[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] +===== `send_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The size of the TCP send buffer (SO_SNDBUF) to use when sending data + +[id="{version}-plugins-{type}s-{plugin}-session_timeout_ms"] +===== `session_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead +and a rebalance operation is triggered for the group identified by `group_id` + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] +===== `ssl_key_password` + + * Value type is <> + * There is no default value for this setting. + +The password of the private key in the key store file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] +===== `ssl_keystore_location` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore path. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value type is <> + * There is no default value for this setting. + +The keystore type. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] +===== `ssl_truststore_location` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore path to validate the Kafka broker's certificate. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is <> + * There is no default value for this setting. + +The truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + + * Value type is <> + * There is no default value for this setting. + +The truststore type. + +[id="{version}-plugins-{type}s-{plugin}-topics"] +===== `topics` + + * Value type is <> + * Default value is `["logstash"]` + +A list of topics to subscribe to, defaults to ["logstash"]. + +[id="{version}-plugins-{type}s-{plugin}-topics_pattern"] +===== `topics_pattern` + + * Value type is <> + * There is no default value for this setting. + +A topic regex pattern to subscribe to. +The topics configuration will be ignored when using this configuration. + +[id="{version}-plugins-{type}s-{plugin}-value_deserializer_class"] +===== `value_deserializer_class` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` + +Java Class used to deserialize the record's value + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/kafka-v8.0.2.asciidoc b/docs/versioned-plugins/inputs/kafka-v8.0.2.asciidoc new file mode 100644 index 000000000..1acd88e0c --- /dev/null +++ b/docs/versioned-plugins/inputs/kafka-v8.0.2.asciidoc @@ -0,0 +1,557 @@ +:plugin: kafka +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v8.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-kafka/blob/v8.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Kafka input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will read events from a Kafka topic. It uses the 0.10 version of +the consumer API provided by Kafka to read messages from the broker. + +Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination +of Logstash and the Kafka input plugin: + +[options="header"] +|========================================================== +|Kafka Client Version |Logstash Version |Plugin Version |Why? +|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular +|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) +|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) +|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker +|0.10.1.x |2.4.x - 5.x.x | 6.x.x | +|0.11.0.0 |2.4.x - 5.x.x | 6.x.x |Not compatible with the <= 0.9 broker +|========================================================== + +NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should +upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker +is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. + +This input supports connecting to Kafka over: + +* SSL (requires plugin version 3.0.0 or later) +* Kerberos SASL (requires plugin version 5.1.0 or later) + +By default security is disabled but can be turned on as needed. + +The Logstash Kafka consumer handles group management and uses the default offset management +strategy using Kafka topics. + +Logstash instances by default form a single logical group to subscribe to Kafka topics +Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively, +you could run multiple Logstash instances with the same `group_id` to spread the load across +physical machines. Messages in a topic will be distributed to all Logstash instances with +the same `group_id`. + +Ideally you should have as many threads as the number of partitions for a perfect balance -- +more threads than partitions means that some threads will be idle + +For more information see http://kafka.apache.org/documentation.html#theconsumer + +Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs + +==== Metadata fields + +The following metadata from Kafka broker are added under the `[@metadata]` field: + +* `[@metadata][kafka][topic]`: Original Kafka topic from where the message was consumed. +* `[@metadata][kafka][consumer_group]`: Consumer group +* `[@metadata][kafka][partition]`: Partition info for this message. +* `[@metadata][kafka][offset]`: Original record offset for this message. +* `[@metadata][kafka][key]`: Record key, if any. +* `[@metadata][kafka][timestamp]`: Timestamp when this message was received by the Kafka broker. + +Please note that `@metadata` fields are not part of any of your events at output time. If you need these information to be +inserted into your original event, you'll have to use the `mutate` filter to manually copy the required fields into your `event`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kafka Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-auto_offset_reset>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-check_crcs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connections_max_idle_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-consumer_threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-decorate_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-enable_auto_commit>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_internal_topics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_max_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_min_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-group_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-key_deserializer_class>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_poll_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_poll_records>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-partition_assignment_strategy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-poll_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No +| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topics_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-value_deserializer_class>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms"] +===== `auto_commit_interval_ms` + + * Value type is <> + * Default value is `"5000"` + +The frequency in milliseconds that the consumer offsets are committed to Kafka. + +[id="{version}-plugins-{type}s-{plugin}-auto_offset_reset"] +===== `auto_offset_reset` + + * Value type is <> + * There is no default value for this setting. + +What to do when there is no initial offset in Kafka or if an offset is out of range: + +* earliest: automatically reset the offset to the earliest offset +* latest: automatically reset the offset to the latest offset +* none: throw exception to the consumer if no previous offset is found for the consumer's group +* anything else: throw exception to the consumer. + +[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] +===== `bootstrap_servers` + + * Value type is <> + * Default value is `"localhost:9092"` + +A list of URLs of Kafka instances to use for establishing the initial connection to the cluster. +This list should be in the form of `host1:port1,host2:port2` These urls are just used +for the initial connection to discover the full cluster membership (which may change dynamically) +so this list need not contain the full set of servers (you may want more than one, though, in +case a server is down). + +[id="{version}-plugins-{type}s-{plugin}-check_crcs"] +===== `check_crcs` + + * Value type is <> + * There is no default value for this setting. + +Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk +corruption to the messages occurred. This check adds some overhead, so it may be +disabled in cases seeking extreme performance. + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * Value type is <> + * Default value is `"logstash"` + +The id string to pass to the server when making requests. The purpose of this +is to be able to track the source of requests beyond just ip/port by allowing +a logical application name to be included. + +[id="{version}-plugins-{type}s-{plugin}-connections_max_idle_ms"] +===== `connections_max_idle_ms` + + * Value type is <> + * There is no default value for this setting. + +Close idle connections after the number of milliseconds specified by this config. + +[id="{version}-plugins-{type}s-{plugin}-consumer_threads"] +===== `consumer_threads` + + * Value type is <> + * Default value is `1` + +Ideally you should have as many threads as the number of partitions for a perfect +balance — more threads than partitions means that some threads will be idle + +[id="{version}-plugins-{type}s-{plugin}-decorate_events"] +===== `decorate_events` + + * Value type is <> + * Default value is `false` + +Option to add Kafka metadata like topic, message size to the event. +This will add a field named `kafka` to the logstash event containing the following attributes: + `topic`: The topic this message is associated with + `consumer_group`: The consumer group used to read in this event + `partition`: The partition this message is associated with + `offset`: The offset from the partition this message is associated with + `key`: A ByteBuffer containing the message key + +[id="{version}-plugins-{type}s-{plugin}-enable_auto_commit"] +===== `enable_auto_commit` + + * Value type is <> + * Default value is `"true"` + +If true, periodically commit to Kafka the offsets of messages already returned by the consumer. +This committed offset will be used when the process fails as the position from +which the consumption will begin. + +[id="{version}-plugins-{type}s-{plugin}-exclude_internal_topics"] +===== `exclude_internal_topics` + + * Value type is <> + * There is no default value for this setting. + +Whether records from internal topics (such as offsets) should be exposed to the consumer. +If set to true the only way to receive records from an internal topic is subscribing to it. + +[id="{version}-plugins-{type}s-{plugin}-fetch_max_bytes"] +===== `fetch_max_bytes` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of data the server should return for a fetch request. This is not an +absolute maximum, if the first message in the first non-empty partition of the fetch is larger +than this value, the message will still be returned to ensure that the consumer can make progress. + +[id="{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms"] +===== `fetch_max_wait_ms` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of time the server will block before answering the fetch request if +there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This +should be less than or equal to the timeout used in `poll_timeout_ms` + +[id="{version}-plugins-{type}s-{plugin}-fetch_min_bytes"] +===== `fetch_min_bytes` + + * Value type is <> + * There is no default value for this setting. + +The minimum amount of data the server should return for a fetch request. If insufficient +data is available the request will wait for that much data to accumulate +before answering the request. + +[id="{version}-plugins-{type}s-{plugin}-group_id"] +===== `group_id` + + * Value type is <> + * Default value is `"logstash"` + +The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber +that happens to be made up of multiple processors. Messages in a topic will be distributed to all +Logstash instances with the same `group_id` + +[id="{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms"] +===== `heartbeat_interval_ms` + + * Value type is <> + * There is no default value for this setting. + +The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure +that the consumer's session stays active and to facilitate rebalancing when new +consumers join or leave the group. The value must be set lower than +`session.timeout.ms`, but typically should be set no higher than 1/3 of that value. +It can be adjusted even lower to control the expected time for normal rebalances. + +[id="{version}-plugins-{type}s-{plugin}-jaas_path"] +===== `jaas_path` + + * Value type is <> + * There is no default value for this setting. + +The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization +services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: +[source,java] +---------------------------------- +KafkaClient { + com.sun.security.auth.module.Krb5LoginModule required + useTicketCache=true + renewTicket=true + serviceName="kafka"; + }; +---------------------------------- + +Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these +to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same +`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on +different JVM instances. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] +===== `kerberos_config` + + * Value type is <> + * There is no default value for this setting. + +Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html + +[id="{version}-plugins-{type}s-{plugin}-key_deserializer_class"] +===== `key_deserializer_class` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` + +Java Class used to deserialize the record's key + +[id="{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes"] +===== `max_partition_fetch_bytes` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of data per-partition the server will return. The maximum total memory used for a +request will be #partitions * max.partition.fetch.bytes. This size must be at least +as large as the maximum message size the server allows or else it is possible for the producer to +send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying +to fetch a large message on a certain partition. + +[id="{version}-plugins-{type}s-{plugin}-max_poll_interval_ms"] +===== `max_poll_interval_ms` + + * Value type is <> + * There is no default value for this setting. + +The maximum delay between invocations of poll() when using consumer group management. This places +an upper bound on the amount of time that the consumer can be idle before fetching more records. +If poll() is not called before expiration of this timeout, then the consumer is considered failed and +the group will rebalance in order to reassign the partitions to another member. +The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms + +[id="{version}-plugins-{type}s-{plugin}-max_poll_records"] +===== `max_poll_records` + + * Value type is <> + * There is no default value for this setting. + +The maximum number of records returned in a single call to poll(). + +[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] +===== `metadata_max_age_ms` + + * Value type is <> + * There is no default value for this setting. + +The period of time in milliseconds after which we force a refresh of metadata even if +we haven't seen any partition leadership changes to proactively discover any new brokers or partitions + +[id="{version}-plugins-{type}s-{plugin}-partition_assignment_strategy"] +===== `partition_assignment_strategy` + + * Value type is <> + * There is no default value for this setting. + +The class name of the partition assignment strategy that the client will use to distribute +partition ownership amongst consumer instances + +[id="{version}-plugins-{type}s-{plugin}-poll_timeout_ms"] +===== `poll_timeout_ms` + + * Value type is <> + * Default value is `100` + +Time kafka consumer will wait to receive new messages from topics + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] +===== `reconnect_backoff_ms` + + * Value type is <> + * There is no default value for this setting. + +The amount of time to wait before attempting to reconnect to a given host. +This avoids repeatedly connecting to a host in a tight loop. +This backoff applies to all requests sent by the consumer to the broker. + +[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] +===== `request_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The configuration controls the maximum amount of time the client will wait +for the response of a request. If the response is not received before the timeout +elapses the client will resend the request if necessary or fail the request if +retries are exhausted. + +[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] +===== `retry_backoff_ms` + + * Value type is <> + * There is no default value for this setting. + +The amount of time to wait before attempting to retry a failed fetch request +to a given topic partition. This avoids repeated fetching-and-failing in a tight loop. + +[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] +===== `sasl_kerberos_service_name` + + * Value type is <> + * There is no default value for this setting. + +The Kerberos principal name that Kafka broker runs as. +This can be defined either in Kafka's JAAS config or in Kafka's config. + +[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] +===== `sasl_mechanism` + + * Value type is <> + * Default value is `"GSSAPI"` + +http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. +This may be any mechanism for which a security provider is available. +GSSAPI is the default mechanism. + +[id="{version}-plugins-{type}s-{plugin}-security_protocol"] +===== `security_protocol` + + * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` + * Default value is `"PLAINTEXT"` + +Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL + +[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] +===== `send_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The size of the TCP send buffer (SO_SNDBUF) to use when sending data + +[id="{version}-plugins-{type}s-{plugin}-session_timeout_ms"] +===== `session_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead +and a rebalance operation is triggered for the group identified by `group_id` + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] +===== `ssl_key_password` + + * Value type is <> + * There is no default value for this setting. + +The password of the private key in the key store file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] +===== `ssl_keystore_location` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore path. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value type is <> + * There is no default value for this setting. + +The keystore type. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] +===== `ssl_truststore_location` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore path to validate the Kafka broker's certificate. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is <> + * There is no default value for this setting. + +The truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + + * Value type is <> + * There is no default value for this setting. + +The truststore type. + +[id="{version}-plugins-{type}s-{plugin}-topics"] +===== `topics` + + * Value type is <> + * Default value is `["logstash"]` + +A list of topics to subscribe to, defaults to ["logstash"]. + +[id="{version}-plugins-{type}s-{plugin}-topics_pattern"] +===== `topics_pattern` + + * Value type is <> + * There is no default value for this setting. + +A topic regex pattern to subscribe to. +The topics configuration will be ignored when using this configuration. + +[id="{version}-plugins-{type}s-{plugin}-value_deserializer_class"] +===== `value_deserializer_class` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` + +Java Class used to deserialize the record's value + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/kafka-v8.0.4.asciidoc b/docs/versioned-plugins/inputs/kafka-v8.0.4.asciidoc new file mode 100644 index 000000000..2cd1b4685 --- /dev/null +++ b/docs/versioned-plugins/inputs/kafka-v8.0.4.asciidoc @@ -0,0 +1,542 @@ +:plugin: kafka +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v8.0.4 +:release_date: 2018-01-05 +:changelog_url: https://github.com/logstash-plugins/logstash-input-kafka/blob/v8.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Kafka input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will read events from a Kafka topic. + +This plugin uses Kafka Client 1.0.0. For broker compatibility, see the official https://cwiki.apache.org/confluence/display/KAFKA/Compatibility+Matrix[Kafka compatibility reference]. + +If you're using a plugin version that was released after {version}, see the https://www.elastic.co/guide/en/logstash/master/plugins-inputs-kafka.html[latest plugin documentation] for updated information about Kafka compatibility. + +This input supports connecting to Kafka over: + +* SSL (requires plugin version 3.0.0 or later) +* Kerberos SASL (requires plugin version 5.1.0 or later) + +By default security is disabled but can be turned on as needed. + +The Logstash Kafka consumer handles group management and uses the default offset management +strategy using Kafka topics. + +Logstash instances by default form a single logical group to subscribe to Kafka topics +Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively, +you could run multiple Logstash instances with the same `group_id` to spread the load across +physical machines. Messages in a topic will be distributed to all Logstash instances with +the same `group_id`. + +Ideally you should have as many threads as the number of partitions for a perfect balance -- +more threads than partitions means that some threads will be idle + +For more information see http://kafka.apache.org/documentation.html#theconsumer + +Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs + +==== Metadata fields + +The following metadata from Kafka broker are added under the `[@metadata]` field: + +* `[@metadata][kafka][topic]`: Original Kafka topic from where the message was consumed. +* `[@metadata][kafka][consumer_group]`: Consumer group +* `[@metadata][kafka][partition]`: Partition info for this message. +* `[@metadata][kafka][offset]`: Original record offset for this message. +* `[@metadata][kafka][key]`: Record key, if any. +* `[@metadata][kafka][timestamp]`: Timestamp when this message was received by the Kafka broker. + +Please note that `@metadata` fields are not part of any of your events at output time. If you need these information to be +inserted into your original event, you'll have to use the `mutate` filter to manually copy the required fields into your `event`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kafka Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-auto_offset_reset>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-check_crcs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connections_max_idle_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-consumer_threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-decorate_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-enable_auto_commit>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_internal_topics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_max_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fetch_min_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-group_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-key_deserializer_class>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_poll_interval_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_poll_records>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-partition_assignment_strategy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-poll_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No +| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topics_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-value_deserializer_class>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms"] +===== `auto_commit_interval_ms` + + * Value type is <> + * Default value is `"5000"` + +The frequency in milliseconds that the consumer offsets are committed to Kafka. + +[id="{version}-plugins-{type}s-{plugin}-auto_offset_reset"] +===== `auto_offset_reset` + + * Value type is <> + * There is no default value for this setting. + +What to do when there is no initial offset in Kafka or if an offset is out of range: + +* earliest: automatically reset the offset to the earliest offset +* latest: automatically reset the offset to the latest offset +* none: throw exception to the consumer if no previous offset is found for the consumer's group +* anything else: throw exception to the consumer. + +[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] +===== `bootstrap_servers` + + * Value type is <> + * Default value is `"localhost:9092"` + +A list of URLs of Kafka instances to use for establishing the initial connection to the cluster. +This list should be in the form of `host1:port1,host2:port2` These urls are just used +for the initial connection to discover the full cluster membership (which may change dynamically) +so this list need not contain the full set of servers (you may want more than one, though, in +case a server is down). + +[id="{version}-plugins-{type}s-{plugin}-check_crcs"] +===== `check_crcs` + + * Value type is <> + * There is no default value for this setting. + +Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk +corruption to the messages occurred. This check adds some overhead, so it may be +disabled in cases seeking extreme performance. + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * Value type is <> + * Default value is `"logstash"` + +The id string to pass to the server when making requests. The purpose of this +is to be able to track the source of requests beyond just ip/port by allowing +a logical application name to be included. + +[id="{version}-plugins-{type}s-{plugin}-connections_max_idle_ms"] +===== `connections_max_idle_ms` + + * Value type is <> + * There is no default value for this setting. + +Close idle connections after the number of milliseconds specified by this config. + +[id="{version}-plugins-{type}s-{plugin}-consumer_threads"] +===== `consumer_threads` + + * Value type is <> + * Default value is `1` + +Ideally you should have as many threads as the number of partitions for a perfect +balance — more threads than partitions means that some threads will be idle + +[id="{version}-plugins-{type}s-{plugin}-decorate_events"] +===== `decorate_events` + + * Value type is <> + * Default value is `false` + +Option to add Kafka metadata like topic, message size to the event. +This will add a field named `kafka` to the logstash event containing the following attributes: + `topic`: The topic this message is associated with + `consumer_group`: The consumer group used to read in this event + `partition`: The partition this message is associated with + `offset`: The offset from the partition this message is associated with + `key`: A ByteBuffer containing the message key + +[id="{version}-plugins-{type}s-{plugin}-enable_auto_commit"] +===== `enable_auto_commit` + + * Value type is <> + * Default value is `"true"` + +If true, periodically commit to Kafka the offsets of messages already returned by the consumer. +This committed offset will be used when the process fails as the position from +which the consumption will begin. + +[id="{version}-plugins-{type}s-{plugin}-exclude_internal_topics"] +===== `exclude_internal_topics` + + * Value type is <> + * There is no default value for this setting. + +Whether records from internal topics (such as offsets) should be exposed to the consumer. +If set to true the only way to receive records from an internal topic is subscribing to it. + +[id="{version}-plugins-{type}s-{plugin}-fetch_max_bytes"] +===== `fetch_max_bytes` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of data the server should return for a fetch request. This is not an +absolute maximum, if the first message in the first non-empty partition of the fetch is larger +than this value, the message will still be returned to ensure that the consumer can make progress. + +[id="{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms"] +===== `fetch_max_wait_ms` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of time the server will block before answering the fetch request if +there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This +should be less than or equal to the timeout used in `poll_timeout_ms` + +[id="{version}-plugins-{type}s-{plugin}-fetch_min_bytes"] +===== `fetch_min_bytes` + + * Value type is <> + * There is no default value for this setting. + +The minimum amount of data the server should return for a fetch request. If insufficient +data is available the request will wait for that much data to accumulate +before answering the request. + +[id="{version}-plugins-{type}s-{plugin}-group_id"] +===== `group_id` + + * Value type is <> + * Default value is `"logstash"` + +The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber +that happens to be made up of multiple processors. Messages in a topic will be distributed to all +Logstash instances with the same `group_id` + +[id="{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms"] +===== `heartbeat_interval_ms` + + * Value type is <> + * There is no default value for this setting. + +The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure +that the consumer's session stays active and to facilitate rebalancing when new +consumers join or leave the group. The value must be set lower than +`session.timeout.ms`, but typically should be set no higher than 1/3 of that value. +It can be adjusted even lower to control the expected time for normal rebalances. + +[id="{version}-plugins-{type}s-{plugin}-jaas_path"] +===== `jaas_path` + + * Value type is <> + * There is no default value for this setting. + +The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization +services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: +[source,java] +---------------------------------- +KafkaClient { + com.sun.security.auth.module.Krb5LoginModule required + useTicketCache=true + renewTicket=true + serviceName="kafka"; + }; +---------------------------------- + +Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these +to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same +`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on +different JVM instances. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] +===== `kerberos_config` + + * Value type is <> + * There is no default value for this setting. + +Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html + +[id="{version}-plugins-{type}s-{plugin}-key_deserializer_class"] +===== `key_deserializer_class` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` + +Java Class used to deserialize the record's key + +[id="{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes"] +===== `max_partition_fetch_bytes` + + * Value type is <> + * There is no default value for this setting. + +The maximum amount of data per-partition the server will return. The maximum total memory used for a +request will be #partitions * max.partition.fetch.bytes. This size must be at least +as large as the maximum message size the server allows or else it is possible for the producer to +send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying +to fetch a large message on a certain partition. + +[id="{version}-plugins-{type}s-{plugin}-max_poll_interval_ms"] +===== `max_poll_interval_ms` + + * Value type is <> + * There is no default value for this setting. + +The maximum delay between invocations of poll() when using consumer group management. This places +an upper bound on the amount of time that the consumer can be idle before fetching more records. +If poll() is not called before expiration of this timeout, then the consumer is considered failed and +the group will rebalance in order to reassign the partitions to another member. +The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms + +[id="{version}-plugins-{type}s-{plugin}-max_poll_records"] +===== `max_poll_records` + + * Value type is <> + * There is no default value for this setting. + +The maximum number of records returned in a single call to poll(). + +[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] +===== `metadata_max_age_ms` + + * Value type is <> + * There is no default value for this setting. + +The period of time in milliseconds after which we force a refresh of metadata even if +we haven't seen any partition leadership changes to proactively discover any new brokers or partitions + +[id="{version}-plugins-{type}s-{plugin}-partition_assignment_strategy"] +===== `partition_assignment_strategy` + + * Value type is <> + * There is no default value for this setting. + +The class name of the partition assignment strategy that the client will use to distribute +partition ownership amongst consumer instances + +[id="{version}-plugins-{type}s-{plugin}-poll_timeout_ms"] +===== `poll_timeout_ms` + + * Value type is <> + * Default value is `100` + +Time kafka consumer will wait to receive new messages from topics + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] +===== `reconnect_backoff_ms` + + * Value type is <> + * There is no default value for this setting. + +The amount of time to wait before attempting to reconnect to a given host. +This avoids repeatedly connecting to a host in a tight loop. +This backoff applies to all requests sent by the consumer to the broker. + +[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] +===== `request_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The configuration controls the maximum amount of time the client will wait +for the response of a request. If the response is not received before the timeout +elapses the client will resend the request if necessary or fail the request if +retries are exhausted. + +[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] +===== `retry_backoff_ms` + + * Value type is <> + * There is no default value for this setting. + +The amount of time to wait before attempting to retry a failed fetch request +to a given topic partition. This avoids repeated fetching-and-failing in a tight loop. + +[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] +===== `sasl_kerberos_service_name` + + * Value type is <> + * There is no default value for this setting. + +The Kerberos principal name that Kafka broker runs as. +This can be defined either in Kafka's JAAS config or in Kafka's config. + +[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] +===== `sasl_mechanism` + + * Value type is <> + * Default value is `"GSSAPI"` + +http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. +This may be any mechanism for which a security provider is available. +GSSAPI is the default mechanism. + +[id="{version}-plugins-{type}s-{plugin}-security_protocol"] +===== `security_protocol` + + * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` + * Default value is `"PLAINTEXT"` + +Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL + +[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] +===== `send_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The size of the TCP send buffer (SO_SNDBUF) to use when sending data + +[id="{version}-plugins-{type}s-{plugin}-session_timeout_ms"] +===== `session_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead +and a rebalance operation is triggered for the group identified by `group_id` + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] +===== `ssl_key_password` + + * Value type is <> + * There is no default value for this setting. + +The password of the private key in the key store file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] +===== `ssl_keystore_location` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore path. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value type is <> + * There is no default value for this setting. + +The keystore type. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] +===== `ssl_truststore_location` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore path to validate the Kafka broker's certificate. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is <> + * There is no default value for this setting. + +The truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + + * Value type is <> + * There is no default value for this setting. + +The truststore type. + +[id="{version}-plugins-{type}s-{plugin}-topics"] +===== `topics` + + * Value type is <> + * Default value is `["logstash"]` + +A list of topics to subscribe to, defaults to ["logstash"]. + +[id="{version}-plugins-{type}s-{plugin}-topics_pattern"] +===== `topics_pattern` + + * Value type is <> + * There is no default value for this setting. + +A topic regex pattern to subscribe to. +The topics configuration will be ignored when using this configuration. + +[id="{version}-plugins-{type}s-{plugin}-value_deserializer_class"] +===== `value_deserializer_class` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` + +Java Class used to deserialize the record's value + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/kinesis-index.asciidoc b/docs/versioned-plugins/inputs/kinesis-index.asciidoc new file mode 100644 index 000000000..565607741 --- /dev/null +++ b/docs/versioned-plugins/inputs/kinesis-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: kinesis +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-14 +| <> | 2017-08-22 +| <> | 2017-07-11 +| <> | 2017-06-27 +|======================================================================= + +include::kinesis-v2.0.7.asciidoc[] +include::kinesis-v2.0.6.asciidoc[] +include::kinesis-v2.0.5.asciidoc[] +include::kinesis-v2.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/kinesis-v2.0.4.asciidoc b/docs/versioned-plugins/inputs/kinesis-v2.0.4.asciidoc new file mode 100644 index 000000000..016c636c1 --- /dev/null +++ b/docs/versioned-plugins/inputs/kinesis-v2.0.4.asciidoc @@ -0,0 +1,105 @@ +Sending Logstash's logs to which is now configured via log4j2.properties +~~~ASCIIDOC_DOCUMENT~~~ +:plugin: kinesis +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.4 +:release_date: 2017-06-27 +:changelog_url: https://github.com/logstash-plugins/logstash-input-kinesis/blob/v2.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Kinesis input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Receive events through an AWS Kinesis stream. + +This input plugin uses the Java Kinesis Client Library underneath, so the +documentation at https://github.com/awslabs/amazon-kinesis-client will be +useful. + +AWS credentials can be specified either through environment variables, or an +IAM instance role. The library uses a DynamoDB table for worker coordination, +so you'll need to grant access to that as well as to the Kinesis stream. The +DynamoDB table has the same name as the `application_name` configuration +option, which defaults to "logstash". + +The library can optionally also send worker statistics to CloudWatch. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kinesis Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-application_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-checkpoint_interval_seconds>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-kinesis_stream_name>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>, one of `[nil, "cloudwatch"]`|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-application_name"] +===== `application_name` + + * Value type is <> + * Default value is `"logstash"` + +The application name used for the dynamodb coordination table. Must be +unique for this kinesis stream. + +[id="{version}-plugins-{type}s-{plugin}-checkpoint_interval_seconds"] +===== `checkpoint_interval_seconds` + + * Value type is <> + * Default value is `60` + +How many seconds between worker checkpoints to dynamodb. + +[id="{version}-plugins-{type}s-{plugin}-kinesis_stream_name"] +===== `kinesis_stream_name` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The kinesis stream name. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value can be any of: ``, `cloudwatch` + * Default value is `nil` + +Worker metric tracking. By default this is disabled, set it to "cloudwatch" +to enable the cloudwatch integration in the Kinesis Client Library. + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value type is <> + * Default value is `"us-east-1"` + +The AWS region for Kinesis, DynamoDB, and CloudWatch (if enabled) + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/kinesis-v2.0.5.asciidoc b/docs/versioned-plugins/inputs/kinesis-v2.0.5.asciidoc new file mode 100644 index 000000000..848438ca3 --- /dev/null +++ b/docs/versioned-plugins/inputs/kinesis-v2.0.5.asciidoc @@ -0,0 +1,112 @@ +:plugin: kinesis +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.5 +:release_date: 2017-07-11 +:changelog_url: https://github.com/logstash-plugins/logstash-input-kinesis/blob/v2.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Kinesis input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Receive events through an AWS Kinesis stream. + +This input plugin uses the Java Kinesis Client Library underneath, so the +documentation at https://github.com/awslabs/amazon-kinesis-client will be +useful. + +AWS credentials can be specified either through environment variables, or an +IAM instance role. The library uses a DynamoDB table for worker coordination, +so you'll need to grant access to that as well as to the Kinesis stream. The +DynamoDB table has the same name as the `application_name` configuration +option, which defaults to "logstash". + +The library can optionally also send worker statistics to CloudWatch. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kinesis Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-application_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-checkpoint_interval_seconds>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-kinesis_stream_name>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>, one of `[nil, "cloudwatch"]`|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-profile>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-application_name"] +===== `application_name` + + * Value type is <> + * Default value is `"logstash"` + +The application name used for the dynamodb coordination table. Must be +unique for this kinesis stream. + +[id="{version}-plugins-{type}s-{plugin}-checkpoint_interval_seconds"] +===== `checkpoint_interval_seconds` + + * Value type is <> + * Default value is `60` + +How many seconds between worker checkpoints to dynamodb. + +[id="{version}-plugins-{type}s-{plugin}-kinesis_stream_name"] +===== `kinesis_stream_name` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The kinesis stream name. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value can be any of: ``, `cloudwatch` + * Default value is `nil` + +Worker metric tracking. By default this is disabled, set it to "cloudwatch" +to enable the cloudwatch integration in the Kinesis Client Library. + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value type is <> + * Default value is `"us-east-1"` + +The AWS region for Kinesis, DynamoDB, and CloudWatch (if enabled) + +[id="{version}-plugins-{type}s-{plugin}-profile"] +===== `profile` + + * Value type is <> + * Default value is `nil` + +The AWS profile name for authentication. +This ensures that the `~/.aws/credentials` AWS auth provider is used. +By default this is empty and the default chain will be used. + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/kinesis-v2.0.6.asciidoc b/docs/versioned-plugins/inputs/kinesis-v2.0.6.asciidoc new file mode 100644 index 000000000..39c7da000 --- /dev/null +++ b/docs/versioned-plugins/inputs/kinesis-v2.0.6.asciidoc @@ -0,0 +1,112 @@ +:plugin: kinesis +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.6 +:release_date: 2017-08-22 +:changelog_url: https://github.com/logstash-plugins/logstash-input-kinesis/blob/v2.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Kinesis input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Receive events through an AWS Kinesis stream. + +This input plugin uses the Java Kinesis Client Library underneath, so the +documentation at https://github.com/awslabs/amazon-kinesis-client will be +useful. + +AWS credentials can be specified either through environment variables, or an +IAM instance role. The library uses a DynamoDB table for worker coordination, +so you'll need to grant access to that as well as to the Kinesis stream. The +DynamoDB table has the same name as the `application_name` configuration +option, which defaults to "logstash". + +The library can optionally also send worker statistics to CloudWatch. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kinesis Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-application_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-checkpoint_interval_seconds>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-kinesis_stream_name>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>, one of `[nil, "cloudwatch"]`|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-profile>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-application_name"] +===== `application_name` + + * Value type is <> + * Default value is `"logstash"` + +The application name used for the dynamodb coordination table. Must be +unique for this kinesis stream. + +[id="{version}-plugins-{type}s-{plugin}-checkpoint_interval_seconds"] +===== `checkpoint_interval_seconds` + + * Value type is <> + * Default value is `60` + +How many seconds between worker checkpoints to dynamodb. + +[id="{version}-plugins-{type}s-{plugin}-kinesis_stream_name"] +===== `kinesis_stream_name` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The kinesis stream name. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value can be any of: ``, `cloudwatch` + * Default value is `nil` + +Worker metric tracking. By default this is disabled, set it to "cloudwatch" +to enable the cloudwatch integration in the Kinesis Client Library. + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value type is <> + * Default value is `"us-east-1"` + +The AWS region for Kinesis, DynamoDB, and CloudWatch (if enabled) + +[id="{version}-plugins-{type}s-{plugin}-profile"] +===== `profile` + + * Value type is <> + * Default value is `nil` + +The AWS profile name for authentication. +This ensures that the `~/.aws/credentials` AWS auth provider is used. +By default this is empty and the default chain will be used. + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/kinesis-v2.0.7.asciidoc b/docs/versioned-plugins/inputs/kinesis-v2.0.7.asciidoc new file mode 100644 index 000000000..4a9335fe3 --- /dev/null +++ b/docs/versioned-plugins/inputs/kinesis-v2.0.7.asciidoc @@ -0,0 +1,112 @@ +:plugin: kinesis +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.7 +:release_date: 2017-11-14 +:changelog_url: https://github.com/logstash-plugins/logstash-input-kinesis/blob/v2.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Kinesis input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Receive events through an AWS Kinesis stream. + +This input plugin uses the Java Kinesis Client Library underneath, so the +documentation at https://github.com/awslabs/amazon-kinesis-client will be +useful. + +AWS credentials can be specified either through environment variables, or an +IAM instance role. The library uses a DynamoDB table for worker coordination, +so you'll need to grant access to that as well as to the Kinesis stream. The +DynamoDB table has the same name as the `application_name` configuration +option, which defaults to "logstash". + +The library can optionally also send worker statistics to CloudWatch. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kinesis Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-application_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-checkpoint_interval_seconds>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-kinesis_stream_name>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>, one of `[nil, "cloudwatch"]`|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-profile>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-application_name"] +===== `application_name` + + * Value type is <> + * Default value is `"logstash"` + +The application name used for the dynamodb coordination table. Must be +unique for this kinesis stream. + +[id="{version}-plugins-{type}s-{plugin}-checkpoint_interval_seconds"] +===== `checkpoint_interval_seconds` + + * Value type is <> + * Default value is `60` + +How many seconds between worker checkpoints to dynamodb. + +[id="{version}-plugins-{type}s-{plugin}-kinesis_stream_name"] +===== `kinesis_stream_name` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The kinesis stream name. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value can be any of: ``, `cloudwatch` + * Default value is `nil` + +Worker metric tracking. By default this is disabled, set it to "cloudwatch" +to enable the cloudwatch integration in the Kinesis Client Library. + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value type is <> + * Default value is `"us-east-1"` + +The AWS region for Kinesis, DynamoDB, and CloudWatch (if enabled) + +[id="{version}-plugins-{type}s-{plugin}-profile"] +===== `profile` + + * Value type is <> + * Default value is `nil` + +The AWS profile name for authentication. +This ensures that the `~/.aws/credentials` AWS auth provider is used. +By default this is empty and the default chain will be used. + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/log4j-index.asciidoc b/docs/versioned-plugins/inputs/log4j-index.asciidoc new file mode 100644 index 000000000..ed17ba880 --- /dev/null +++ b/docs/versioned-plugins/inputs/log4j-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: log4j +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-08-01 +| <> | 2017-06-23 +|======================================================================= + +include::log4j-v3.1.2.asciidoc[] +include::log4j-v3.1.1.asciidoc[] +include::log4j-v3.1.0.asciidoc[] +include::log4j-v3.0.6.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/log4j-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/log4j-v3.0.6.asciidoc new file mode 100644 index 000000000..1d7205ea6 --- /dev/null +++ b/docs/versioned-plugins/inputs/log4j-v3.0.6.asciidoc @@ -0,0 +1,171 @@ +:plugin: log4j +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-log4j/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Log4j input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +==== Deprecation Notice + +NOTE: This plugin is deprecated. It is recommended that you use filebeat to collect logs from log4j. + +The following section is a guide for how to migrate from SocketAppender to use filebeat. + +To migrate away from log4j SocketAppender to using filebeat, you will need to make 3 changes: + +1) Configure your log4j.properties (in your app) to write to a local file. +2) Install and configure filebeat to collect those logs and ship them to Logstash +3) Configure Logstash to use the beats input. + +.Configuring log4j for writing to local files + +In your log4j.properties file, remove SocketAppender and replace it with RollingFileAppender. + +For example, you can use the following log4j.properties configuration to write daily log files. + + # Your app's log4j.properties (log4j 1.2 only) + log4j.rootLogger=daily + log4j.appender.daily=org.apache.log4j.rolling.RollingFileAppender + log4j.appender.daily.RollingPolicy=org.apache.log4j.rolling.TimeBasedRollingPolicy + log4j.appender.daily.RollingPolicy.FileNamePattern=/var/log/your-app/app.%d.log + log4j.appender.daily.layout = org.apache.log4j.PatternLayout + log4j.appender.daily.layout.ConversionPattern=%d{YYYY-MM-dd HH:mm:ss,SSSZ} %p %c{1}:%L - %m%n + +Configuring log4j.properties in more detail is outside the scope of this migration guide. + +.Configuring filebeat + +Next, +https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation.html[install +filebeat]. Based on the above log4j.properties, we can use this filebeat +configuration: + + # filebeat.yml + filebeat: + prospectors: + - + paths: + - /var/log/your-app/app.*.log + input_type: log + output: + logstash: + hosts: ["your-logstash-host:5000"] + +For more details on configuring filebeat, see +https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-configuration.html[the filebeat configuration guide]. + +.Configuring Logstash to receive from filebeat + +Finally, configure Logstash with a beats input: + + # logstash configuration + input { + beats { + port => 5000 + } + } + +It is strongly recommended that you also enable TLS in filebeat and logstash +beats input for protection and safety of your log data.. + +For more details on configuring the beats input, see +https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html[the logstash beats input documentation]. + +''' + +Read events over a TCP socket from a Log4j SocketAppender. This plugin works only with log4j version 1.x. + +Can either accept connections from clients or connect to a server, +depending on `mode`. Depending on which `mode` is configured, +you need a matching SocketAppender or a SocketHubAppender +on the remote side. + +One event is created per received log4j LoggingEvent with the following schema: + +* `timestamp` => the number of milliseconds elapsed from 1/1/1970 until logging event was created. +* `path` => the name of the logger +* `priority` => the level of this event +* `logger_name` => the name of the logger +* `thread` => the thread name making the logging request +* `class` => the fully qualified class name of the caller making the logging request. +* `file` => the source file name and line number of the caller making the logging request in a colon-separated format "fileName:lineNumber". +* `method` => the method name of the caller making the logging request. +* `NDC` => the NDC string +* `stack_trace` => the multi-line stack-trace + +Also if the original log4j LoggingEvent contains MDC hash entries, they will be merged in the event as fields. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Log4j Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `4560` + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/log4j-v3.1.0.asciidoc b/docs/versioned-plugins/inputs/log4j-v3.1.0.asciidoc new file mode 100644 index 000000000..c0255b298 --- /dev/null +++ b/docs/versioned-plugins/inputs/log4j-v3.1.0.asciidoc @@ -0,0 +1,169 @@ +:plugin: log4j +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.0 +:release_date: 2017-08-01 +:changelog_url: https://github.com/logstash-plugins/logstash-input-log4j/blob/v3.1.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Log4j input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Deprecation Notice + +NOTE: This plugin is deprecated. It is recommended that you use filebeat to collect logs from log4j. + +The following section is a guide for how to migrate from SocketAppender to use filebeat. + +To migrate away from log4j SocketAppender to using filebeat, you will need to make 3 changes: + +1) Configure your log4j.properties (in your app) to write to a local file. +2) Install and configure filebeat to collect those logs and ship them to Logstash +3) Configure Logstash to use the beats input. + +.Configuring log4j for writing to local files + +In your log4j.properties file, remove SocketAppender and replace it with RollingFileAppender. + +For example, you can use the following log4j.properties configuration to write daily log files. + + # Your app's log4j.properties (log4j 1.2 only) + log4j.rootLogger=daily + log4j.appender.daily=org.apache.log4j.rolling.RollingFileAppender + log4j.appender.daily.RollingPolicy=org.apache.log4j.rolling.TimeBasedRollingPolicy + log4j.appender.daily.RollingPolicy.FileNamePattern=/var/log/your-app/app.%d.log + log4j.appender.daily.layout = org.apache.log4j.PatternLayout + log4j.appender.daily.layout.ConversionPattern=%d{YYYY-MM-dd HH:mm:ss,SSSZ} %p %c{1}:%L - %m%n + +Configuring log4j.properties in more detail is outside the scope of this migration guide. + +.Configuring filebeat + +Next, +https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation.html[install +filebeat]. Based on the above log4j.properties, we can use this filebeat +configuration: + + # filebeat.yml + filebeat: + prospectors: + - + paths: + - /var/log/your-app/app.*.log + input_type: log + output: + logstash: + hosts: ["your-logstash-host:5000"] + +For more details on configuring filebeat, see +https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-configuration.html[the filebeat configuration guide]. + +.Configuring Logstash to receive from filebeat + +Finally, configure Logstash with a beats input: + + # logstash configuration + input { + beats { + port => 5000 + } + } + +It is strongly recommended that you also enable TLS in filebeat and logstash +beats input for protection and safety of your log data.. + +For more details on configuring the beats input, see +https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html[the logstash beats input documentation]. + +==== Description + +Read events over a TCP socket from a Log4j SocketAppender. This plugin works only with log4j version 1.x. + +Can either accept connections from clients or connect to a server, +depending on `mode`. Depending on which `mode` is configured, +you need a matching SocketAppender or a SocketHubAppender +on the remote side. + +One event is created per received log4j LoggingEvent with the following schema: + +* `timestamp` => the number of milliseconds elapsed from 1/1/1970 until logging event was created. +* `path` => the name of the logger +* `priority` => the level of this event +* `logger_name` => the name of the logger +* `thread` => the thread name making the logging request +* `class` => the fully qualified class name of the caller making the logging request. +* `file` => the source file name and line number of the caller making the logging request in a colon-separated format "fileName:lineNumber". +* `method` => the method name of the caller making the logging request. +* `NDC` => the NDC string +* `stack_trace` => the multi-line stack-trace + +Also if the original log4j LoggingEvent contains MDC hash entries, they will be merged in the event as fields. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Log4j Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `4560` + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/log4j-v3.1.1.asciidoc b/docs/versioned-plugins/inputs/log4j-v3.1.1.asciidoc new file mode 100644 index 000000000..703bb94fe --- /dev/null +++ b/docs/versioned-plugins/inputs/log4j-v3.1.1.asciidoc @@ -0,0 +1,169 @@ +:plugin: log4j +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.1 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-log4j/blob/v3.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Log4j input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Deprecation Notice + +NOTE: This plugin is deprecated. It is recommended that you use filebeat to collect logs from log4j. + +The following section is a guide for how to migrate from SocketAppender to use filebeat. + +To migrate away from log4j SocketAppender to using filebeat, you will need to make 3 changes: + +1) Configure your log4j.properties (in your app) to write to a local file. +2) Install and configure filebeat to collect those logs and ship them to Logstash +3) Configure Logstash to use the beats input. + +.Configuring log4j for writing to local files + +In your log4j.properties file, remove SocketAppender and replace it with RollingFileAppender. + +For example, you can use the following log4j.properties configuration to write daily log files. + + # Your app's log4j.properties (log4j 1.2 only) + log4j.rootLogger=daily + log4j.appender.daily=org.apache.log4j.rolling.RollingFileAppender + log4j.appender.daily.RollingPolicy=org.apache.log4j.rolling.TimeBasedRollingPolicy + log4j.appender.daily.RollingPolicy.FileNamePattern=/var/log/your-app/app.%d.log + log4j.appender.daily.layout = org.apache.log4j.PatternLayout + log4j.appender.daily.layout.ConversionPattern=%d{YYYY-MM-dd HH:mm:ss,SSSZ} %p %c{1}:%L - %m%n + +Configuring log4j.properties in more detail is outside the scope of this migration guide. + +.Configuring filebeat + +Next, +https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation.html[install +filebeat]. Based on the above log4j.properties, we can use this filebeat +configuration: + + # filebeat.yml + filebeat: + prospectors: + - + paths: + - /var/log/your-app/app.*.log + input_type: log + output: + logstash: + hosts: ["your-logstash-host:5000"] + +For more details on configuring filebeat, see +https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-configuration.html[the filebeat configuration guide]. + +.Configuring Logstash to receive from filebeat + +Finally, configure Logstash with a beats input: + + # logstash configuration + input { + beats { + port => 5000 + } + } + +It is strongly recommended that you also enable TLS in filebeat and logstash +beats input for protection and safety of your log data.. + +For more details on configuring the beats input, see +https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html[the logstash beats input documentation]. + +==== Description + +Read events over a TCP socket from a Log4j SocketAppender. This plugin works only with log4j version 1.x. + +Can either accept connections from clients or connect to a server, +depending on `mode`. Depending on which `mode` is configured, +you need a matching SocketAppender or a SocketHubAppender +on the remote side. + +One event is created per received log4j LoggingEvent with the following schema: + +* `timestamp` => the number of milliseconds elapsed from 1/1/1970 until logging event was created. +* `path` => the name of the logger +* `priority` => the level of this event +* `logger_name` => the name of the logger +* `thread` => the thread name making the logging request +* `class` => the fully qualified class name of the caller making the logging request. +* `file` => the source file name and line number of the caller making the logging request in a colon-separated format "fileName:lineNumber". +* `method` => the method name of the caller making the logging request. +* `NDC` => the NDC string +* `stack_trace` => the multi-line stack-trace + +Also if the original log4j LoggingEvent contains MDC hash entries, they will be merged in the event as fields. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Log4j Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `4560` + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/log4j-v3.1.2.asciidoc b/docs/versioned-plugins/inputs/log4j-v3.1.2.asciidoc new file mode 100644 index 000000000..d2721bbc1 --- /dev/null +++ b/docs/versioned-plugins/inputs/log4j-v3.1.2.asciidoc @@ -0,0 +1,169 @@ +:plugin: log4j +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-log4j/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Log4j input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Deprecation Notice + +NOTE: This plugin is deprecated. It is recommended that you use filebeat to collect logs from log4j. + +The following section is a guide for how to migrate from SocketAppender to use filebeat. + +To migrate away from log4j SocketAppender to using filebeat, you will need to make 3 changes: + +1) Configure your log4j.properties (in your app) to write to a local file. +2) Install and configure filebeat to collect those logs and ship them to Logstash +3) Configure Logstash to use the beats input. + +.Configuring log4j for writing to local files + +In your log4j.properties file, remove SocketAppender and replace it with RollingFileAppender. + +For example, you can use the following log4j.properties configuration to write daily log files. + + # Your app's log4j.properties (log4j 1.2 only) + log4j.rootLogger=daily + log4j.appender.daily=org.apache.log4j.rolling.RollingFileAppender + log4j.appender.daily.RollingPolicy=org.apache.log4j.rolling.TimeBasedRollingPolicy + log4j.appender.daily.RollingPolicy.FileNamePattern=/var/log/your-app/app.%d.log + log4j.appender.daily.layout = org.apache.log4j.PatternLayout + log4j.appender.daily.layout.ConversionPattern=%d{YYYY-MM-dd HH:mm:ss,SSSZ} %p %c{1}:%L - %m%n + +Configuring log4j.properties in more detail is outside the scope of this migration guide. + +.Configuring filebeat + +Next, +https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation.html[install +filebeat]. Based on the above log4j.properties, we can use this filebeat +configuration: + + # filebeat.yml + filebeat: + prospectors: + - + paths: + - /var/log/your-app/app.*.log + input_type: log + output: + logstash: + hosts: ["your-logstash-host:5000"] + +For more details on configuring filebeat, see +https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-configuration.html[the filebeat configuration guide]. + +.Configuring Logstash to receive from filebeat + +Finally, configure Logstash with a beats input: + + # logstash configuration + input { + beats { + port => 5000 + } + } + +It is strongly recommended that you also enable TLS in filebeat and logstash +beats input for protection and safety of your log data.. + +For more details on configuring the beats input, see +https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html[the logstash beats input documentation]. + +==== Description + +Read events over a TCP socket from a Log4j SocketAppender. This plugin works only with log4j version 1.x. + +Can either accept connections from clients or connect to a server, +depending on `mode`. Depending on which `mode` is configured, +you need a matching SocketAppender or a SocketHubAppender +on the remote side. + +One event is created per received log4j LoggingEvent with the following schema: + +* `timestamp` => the number of milliseconds elapsed from 1/1/1970 until logging event was created. +* `path` => the name of the logger +* `priority` => the level of this event +* `logger_name` => the name of the logger +* `thread` => the thread name making the logging request +* `class` => the fully qualified class name of the caller making the logging request. +* `file` => the source file name and line number of the caller making the logging request in a colon-separated format "fileName:lineNumber". +* `method` => the method name of the caller making the logging request. +* `NDC` => the NDC string +* `stack_trace` => the multi-line stack-trace + +Also if the original log4j LoggingEvent contains MDC hash entries, they will be merged in the event as fields. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Log4j Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `4560` + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/log4j2-index.asciidoc b/docs/versioned-plugins/inputs/log4j2-index.asciidoc new file mode 100644 index 000000000..fd9f25a6a --- /dev/null +++ b/docs/versioned-plugins/inputs/log4j2-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: log4j2 +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/inputs/lumberjack-index.asciidoc b/docs/versioned-plugins/inputs/lumberjack-index.asciidoc new file mode 100644 index 000000000..651759204 --- /dev/null +++ b/docs/versioned-plugins/inputs/lumberjack-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: lumberjack +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::lumberjack-v3.1.4.asciidoc[] +include::lumberjack-v3.1.3.asciidoc[] +include::lumberjack-v3.1.2.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/lumberjack-v3.1.2.asciidoc b/docs/versioned-plugins/inputs/lumberjack-v3.1.2.asciidoc new file mode 100644 index 000000000..66a95a218 --- /dev/null +++ b/docs/versioned-plugins/inputs/lumberjack-v3.1.2.asciidoc @@ -0,0 +1,112 @@ +:plugin: lumberjack +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-lumberjack/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Lumberjack input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Receive events using the Lumberjack protocol. + +This input can be used to reliably and securely transport +events between Logstash instances. To do so, use the +<> +in the sending Logstash instance(s). + +It can also be used to receive events from the deprecated +https://github.com/elastic/logstash-forwarder[logstash-forwarder] +tool that has been replaced by +https://github.com/elastic/beats/tree/master/filebeat[Filebeat]. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Lumberjack Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` + + * Value type is <> + * Default value is `5` + +The number of seconds before we raise a timeout, +this option is useful to control how much time to wait if something is blocking the pipeline. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +SSL key to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * There is no default value for this setting. + +SSL key passphrase to use. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/lumberjack-v3.1.3.asciidoc b/docs/versioned-plugins/inputs/lumberjack-v3.1.3.asciidoc new file mode 100644 index 000000000..666c47368 --- /dev/null +++ b/docs/versioned-plugins/inputs/lumberjack-v3.1.3.asciidoc @@ -0,0 +1,112 @@ +:plugin: lumberjack +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-lumberjack/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Lumberjack input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Receive events using the Lumberjack protocol. + +This input can be used to reliably and securely transport +events between Logstash instances. To do so, use the +<> +in the sending Logstash instance(s). + +It can also be used to receive events from the deprecated +https://github.com/elastic/logstash-forwarder[logstash-forwarder] +tool that has been replaced by +https://github.com/elastic/beats/tree/master/filebeat[Filebeat]. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Lumberjack Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` + + * Value type is <> + * Default value is `5` + +The number of seconds before we raise a timeout, +this option is useful to control how much time to wait if something is blocking the pipeline. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +SSL key to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * There is no default value for this setting. + +SSL key passphrase to use. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/lumberjack-v3.1.4.asciidoc b/docs/versioned-plugins/inputs/lumberjack-v3.1.4.asciidoc new file mode 100644 index 000000000..bd0514369 --- /dev/null +++ b/docs/versioned-plugins/inputs/lumberjack-v3.1.4.asciidoc @@ -0,0 +1,112 @@ +:plugin: lumberjack +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.4 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-lumberjack/blob/v3.1.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Lumberjack input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Receive events using the Lumberjack protocol. + +This input can be used to reliably and securely transport +events between Logstash instances. To do so, use the +<> +in the sending Logstash instance(s). + +It can also be used to receive events from the deprecated +https://github.com/elastic/logstash-forwarder[logstash-forwarder] +tool that has been replaced by +https://github.com/elastic/beats/tree/master/filebeat[Filebeat]. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Lumberjack Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` + + * Value type is <> + * Default value is `5` + +The number of seconds before we raise a timeout, +this option is useful to control how much time to wait if something is blocking the pipeline. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The IP address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +SSL certificate to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +SSL key to use. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * There is no default value for this setting. + +SSL key passphrase to use. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/meetup-index.asciidoc b/docs/versioned-plugins/inputs/meetup-index.asciidoc new file mode 100644 index 000000000..baff7792e --- /dev/null +++ b/docs/versioned-plugins/inputs/meetup-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: meetup +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::meetup-v3.0.3.asciidoc[] +include::meetup-v3.0.2.asciidoc[] +include::meetup-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/meetup-v3.0.1.asciidoc b/docs/versioned-plugins/inputs/meetup-v3.0.1.asciidoc new file mode 100644 index 000000000..5802be8cf --- /dev/null +++ b/docs/versioned-plugins/inputs/meetup-v3.0.1.asciidoc @@ -0,0 +1,102 @@ +:plugin: meetup +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-meetup/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Meetup input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Periodically query meetup.com regarding updates on events for the given meetupkey + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Meetup Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-eventstatus>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-groupid>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-meetupkey>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-urlname>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-venueid>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-eventstatus"] +===== `eventstatus` + + * Value type is <> + * Default value is `"upcoming,past"` + +Event Status' + +[id="{version}-plugins-{type}s-{plugin}-groupid"] +===== `groupid` + + * Value type is <> + * There is no default value for this setting. + +The Group ID, multiple may be specified seperated by commas +Must have one of `urlname`, `venueid`, `groupid` + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Interval to run the command. Value is in minutes. + +[id="{version}-plugins-{type}s-{plugin}-meetupkey"] +===== `meetupkey` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Meetup Key + +[id="{version}-plugins-{type}s-{plugin}-urlname"] +===== `urlname` + + * Value type is <> + * There is no default value for this setting. + +URLName - the URL name ie `ElasticSearch-Oklahoma-City` +Must have one of urlname, venue_id, group_id + +[id="{version}-plugins-{type}s-{plugin}-venueid"] +===== `venueid` + + * Value type is <> + * There is no default value for this setting. + +The venue ID +Must have one of `urlname`, `venue_id`, `group_id` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/meetup-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/meetup-v3.0.2.asciidoc new file mode 100644 index 000000000..11a0edd87 --- /dev/null +++ b/docs/versioned-plugins/inputs/meetup-v3.0.2.asciidoc @@ -0,0 +1,102 @@ +:plugin: meetup +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-meetup/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Meetup input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Periodically query meetup.com regarding updates on events for the given meetupkey + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Meetup Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-eventstatus>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-groupid>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-meetupkey>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-urlname>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-venueid>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-eventstatus"] +===== `eventstatus` + + * Value type is <> + * Default value is `"upcoming,past"` + +Event Status' + +[id="{version}-plugins-{type}s-{plugin}-groupid"] +===== `groupid` + + * Value type is <> + * There is no default value for this setting. + +The Group ID, multiple may be specified seperated by commas +Must have one of `urlname`, `venueid`, `groupid` + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Interval to run the command. Value is in minutes. + +[id="{version}-plugins-{type}s-{plugin}-meetupkey"] +===== `meetupkey` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Meetup Key + +[id="{version}-plugins-{type}s-{plugin}-urlname"] +===== `urlname` + + * Value type is <> + * There is no default value for this setting. + +URLName - the URL name ie `ElasticSearch-Oklahoma-City` +Must have one of urlname, venue_id, group_id + +[id="{version}-plugins-{type}s-{plugin}-venueid"] +===== `venueid` + + * Value type is <> + * There is no default value for this setting. + +The venue ID +Must have one of `urlname`, `venue_id`, `group_id` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/meetup-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/meetup-v3.0.3.asciidoc new file mode 100644 index 000000000..01c34acb8 --- /dev/null +++ b/docs/versioned-plugins/inputs/meetup-v3.0.3.asciidoc @@ -0,0 +1,102 @@ +:plugin: meetup +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-meetup/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Meetup input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Periodically query meetup.com regarding updates on events for the given meetupkey + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Meetup Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-eventstatus>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-groupid>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-meetupkey>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-urlname>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-venueid>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-eventstatus"] +===== `eventstatus` + + * Value type is <> + * Default value is `"upcoming,past"` + +Event Status' + +[id="{version}-plugins-{type}s-{plugin}-groupid"] +===== `groupid` + + * Value type is <> + * There is no default value for this setting. + +The Group ID, multiple may be specified seperated by commas +Must have one of `urlname`, `venueid`, `groupid` + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Interval to run the command. Value is in minutes. + +[id="{version}-plugins-{type}s-{plugin}-meetupkey"] +===== `meetupkey` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Meetup Key + +[id="{version}-plugins-{type}s-{plugin}-urlname"] +===== `urlname` + + * Value type is <> + * There is no default value for this setting. + +URLName - the URL name ie `ElasticSearch-Oklahoma-City` +Must have one of urlname, venue_id, group_id + +[id="{version}-plugins-{type}s-{plugin}-venueid"] +===== `venueid` + + * Value type is <> + * There is no default value for this setting. + +The venue ID +Must have one of `urlname`, `venue_id`, `group_id` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/mongodb-index.asciidoc b/docs/versioned-plugins/inputs/mongodb-index.asciidoc new file mode 100644 index 000000000..4f5089843 --- /dev/null +++ b/docs/versioned-plugins/inputs/mongodb-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: mongodb +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/inputs/neo4j-index.asciidoc b/docs/versioned-plugins/inputs/neo4j-index.asciidoc new file mode 100644 index 000000000..66b3d770c --- /dev/null +++ b/docs/versioned-plugins/inputs/neo4j-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: neo4j +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::neo4j-v2.0.6.asciidoc[] +include::neo4j-v2.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/neo4j-v2.0.5.asciidoc b/docs/versioned-plugins/inputs/neo4j-v2.0.5.asciidoc new file mode 100644 index 000000000..a5910f9b0 --- /dev/null +++ b/docs/versioned-plugins/inputs/neo4j-v2.0.5.asciidoc @@ -0,0 +1,93 @@ +:plugin: neo4j +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-neo4j/blob/v2.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Neo4j input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This plugin gets data from a Neo4j database in predefined intervals. To fetch +this data uses a given Cypher query. + +### Usage: +[source, ruby] +input { + neo4j { + query => "MATCH (p:`Person`)-->(m:`Movie`) WHERE m.released = 2005 RETURN *" + path => "/foo/bar.db" + } +} + +In embedded_db mode this plugin require a neo4j db 2.0.1 or superior. If +using the remote version there is no major restriction. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Neo4j Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The path within your file system where the neo4j database is located + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +If undefined, Logstash will complain, even if codec is unused. +Cypher query used to retrieve data from the neo4j database, this statement +should looks like something like this: + +MATCH (p:`Person`)-->(m:`Movie`) WHERE m.released = 2005 RETURN * + + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically run statement, in Cron format +for example: "* * * * *" (execute query every minute, on the minute). +If this variable is not specified then this input will run only once + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/neo4j-v2.0.6.asciidoc b/docs/versioned-plugins/inputs/neo4j-v2.0.6.asciidoc new file mode 100644 index 000000000..4af34443e --- /dev/null +++ b/docs/versioned-plugins/inputs/neo4j-v2.0.6.asciidoc @@ -0,0 +1,93 @@ +:plugin: neo4j +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.6 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-neo4j/blob/v2.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Neo4j input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This plugin gets data from a Neo4j database in predefined intervals. To fetch +this data uses a given Cypher query. + +### Usage: +[source, ruby] +input { + neo4j { + query => "MATCH (p:`Person`)-->(m:`Movie`) WHERE m.released = 2005 RETURN *" + path => "/foo/bar.db" + } +} + +In embedded_db mode this plugin require a neo4j db 2.0.1 or superior. If +using the remote version there is no major restriction. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Neo4j Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The path within your file system where the neo4j database is located + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +If undefined, Logstash will complain, even if codec is unused. +Cypher query used to retrieve data from the neo4j database, this statement +should looks like something like this: + +MATCH (p:`Person`)-->(m:`Movie`) WHERE m.released = 2005 RETURN * + + +[id="{version}-plugins-{type}s-{plugin}-schedule"] +===== `schedule` + + * Value type is <> + * There is no default value for this setting. + +Schedule of when to periodically run statement, in Cron format +for example: "* * * * *" (execute query every minute, on the minute). +If this variable is not specified then this input will run only once + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/netflow-index.asciidoc b/docs/versioned-plugins/inputs/netflow-index.asciidoc new file mode 100644 index 000000000..8b4978af7 --- /dev/null +++ b/docs/versioned-plugins/inputs/netflow-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: netflow +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/inputs/perfmon-index.asciidoc b/docs/versioned-plugins/inputs/perfmon-index.asciidoc new file mode 100644 index 000000000..165e7842d --- /dev/null +++ b/docs/versioned-plugins/inputs/perfmon-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: perfmon +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/inputs/pipe-index.asciidoc b/docs/versioned-plugins/inputs/pipe-index.asciidoc new file mode 100644 index 000000000..8d87e45a0 --- /dev/null +++ b/docs/versioned-plugins/inputs/pipe-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: pipe +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::pipe-v3.0.6.asciidoc[] +include::pipe-v3.0.5.asciidoc[] +include::pipe-v3.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/pipe-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/pipe-v3.0.4.asciidoc new file mode 100644 index 000000000..29c8ee8a2 --- /dev/null +++ b/docs/versioned-plugins/inputs/pipe-v3.0.4.asciidoc @@ -0,0 +1,61 @@ +:plugin: pipe +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-pipe/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Pipe input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from a long running command pipe. + +By default, each event is assumed to be one line. If you +want to join lines, you'll want to use the multiline codec. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Pipe Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-command"] +===== `command` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Command to run and read events from, one line at a time. + +Example: +[source,ruby] + command => "echo hello world" + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/pipe-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/pipe-v3.0.5.asciidoc new file mode 100644 index 000000000..50b88a09d --- /dev/null +++ b/docs/versioned-plugins/inputs/pipe-v3.0.5.asciidoc @@ -0,0 +1,61 @@ +:plugin: pipe +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-pipe/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Pipe input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from a long running command pipe. + +By default, each event is assumed to be one line. If you +want to join lines, you'll want to use the multiline codec. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Pipe Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-command"] +===== `command` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Command to run and read events from, one line at a time. + +Example: +[source,ruby] + command => "echo hello world" + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/pipe-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/pipe-v3.0.6.asciidoc new file mode 100644 index 000000000..46eabd7bb --- /dev/null +++ b/docs/versioned-plugins/inputs/pipe-v3.0.6.asciidoc @@ -0,0 +1,61 @@ +:plugin: pipe +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-pipe/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Pipe input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from a long running command pipe. + +By default, each event is assumed to be one line. If you +want to join lines, you'll want to use the multiline codec. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Pipe Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-command"] +===== `command` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Command to run and read events from, one line at a time. + +Example: +[source,ruby] + command => "echo hello world" + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/puppet_facter-index.asciidoc b/docs/versioned-plugins/inputs/puppet_facter-index.asciidoc new file mode 100644 index 000000000..cf3e696ac --- /dev/null +++ b/docs/versioned-plugins/inputs/puppet_facter-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: puppet_facter +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-15 +| <> | 2017-06-23 +|======================================================================= + +include::puppet_facter-v3.0.3.asciidoc[] +include::puppet_facter-v3.0.2.asciidoc[] +include::puppet_facter-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/puppet_facter-v3.0.1.asciidoc b/docs/versioned-plugins/inputs/puppet_facter-v3.0.1.asciidoc new file mode 100644 index 000000000..426557593 --- /dev/null +++ b/docs/versioned-plugins/inputs/puppet_facter-v3.0.1.asciidoc @@ -0,0 +1,106 @@ +:plugin: puppet_facter +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-puppet_facter/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Puppet_facter input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Connects to a puppet server and requests facts + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Puppet_facter Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-environment>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-private_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-public_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-environment"] +===== `environment` + + * Value type is <> + * Default value is `"production"` + + + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + + + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `600` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8140` + + + +[id="{version}-plugins-{type}s-{plugin}-private_key"] +===== `private_key` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-public_key"] +===== `public_key` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `true` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/puppet_facter-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/puppet_facter-v3.0.2.asciidoc new file mode 100644 index 000000000..2a1e1400b --- /dev/null +++ b/docs/versioned-plugins/inputs/puppet_facter-v3.0.2.asciidoc @@ -0,0 +1,106 @@ +:plugin: puppet_facter +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-puppet_facter/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Puppet_facter input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Connects to a puppet server and requests facts + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Puppet_facter Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-environment>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-private_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-public_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-environment"] +===== `environment` + + * Value type is <> + * Default value is `"production"` + + + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + + + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `600` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8140` + + + +[id="{version}-plugins-{type}s-{plugin}-private_key"] +===== `private_key` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-public_key"] +===== `public_key` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `true` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/puppet_facter-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/puppet_facter-v3.0.3.asciidoc new file mode 100644 index 000000000..9fc392a4b --- /dev/null +++ b/docs/versioned-plugins/inputs/puppet_facter-v3.0.3.asciidoc @@ -0,0 +1,106 @@ +:plugin: puppet_facter +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-puppet_facter/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Puppet_facter input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Connects to a puppet server and requests facts + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Puppet_facter Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-environment>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-private_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-public_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-environment"] +===== `environment` + + * Value type is <> + * Default value is `"production"` + + + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + + + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `600` + + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8140` + + + +[id="{version}-plugins-{type}s-{plugin}-private_key"] +===== `private_key` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-public_key"] +===== `public_key` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `true` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rabbitmq-index.asciidoc b/docs/versioned-plugins/inputs/rabbitmq-index.asciidoc new file mode 100644 index 000000000..d8f3ed14b --- /dev/null +++ b/docs/versioned-plugins/inputs/rabbitmq-index.asciidoc @@ -0,0 +1,20 @@ +:plugin: rabbitmq +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-08-02 +| <> | 2017-08-18 +| <> | 2017-06-23 +|======================================================================= + +include::rabbitmq-v6.0.2.asciidoc[] +include::rabbitmq-v6.0.1.asciidoc[] +include::rabbitmq-v6.0.0.asciidoc[] +include::rabbitmq-v5.2.5.asciidoc[] +include::rabbitmq-v5.2.4.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/rabbitmq-v5.2.4.asciidoc b/docs/versioned-plugins/inputs/rabbitmq-v5.2.4.asciidoc new file mode 100644 index 000000000..714bc915e --- /dev/null +++ b/docs/versioned-plugins/inputs/rabbitmq-v5.2.4.asciidoc @@ -0,0 +1,415 @@ +:plugin: rabbitmq +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.2.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-rabbitmq/blob/v5.2.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Rabbitmq input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Pull events from a http://www.rabbitmq.com/[RabbitMQ] queue. + +The default settings will create an entirely transient queue and listen for all messages by default. +If you need durability or any other advanced settings, please set the appropriate options + +This plugin uses the http://rubymarchhare.info/[March Hare] library +for interacting with the RabbitMQ server. Most configuration options +map directly to standard RabbitMQ and AMQP concepts. The +https://www.rabbitmq.com/amqp-0-9-1-reference.html[AMQP 0-9-1 reference guide] +and other parts of the RabbitMQ documentation are useful for deeper +understanding. + +The properties of messages received will be stored in the +`[@metadata][rabbitmq_properties]` field if the `@metadata_enabled` setting is checked. +Note that storing metadata may degrade performance. +The following properties may be available (in most cases dependent on whether +they were set by the sender): + +* app-id +* cluster-id +* consumer-tag +* content-encoding +* content-type +* correlation-id +* delivery-mode +* exchange +* expiration +* message-id +* priority +* redeliver +* reply-to +* routing-key +* timestamp +* type +* user-id + +For example, to get the RabbitMQ message's timestamp property +into the Logstash event's `@timestamp` field, use the date +filter to parse the `[@metadata][rabbitmq_properties][timestamp]` +field: +[source,ruby] + filter { + if [@metadata][rabbitmq_properties][timestamp] { + date { + match => ["[@metadata][rabbitmq_properties][timestamp]", "UNIX"] + } + } + } + +Additionally, any message headers will be saved in the +`[@metadata][rabbitmq_headers]` field. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rabbitmq Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ack>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-auto_delete>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclusive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_enabled>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefetch_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ack"] +===== `ack` + + * Value type is <> + * Default value is `true` + +Enable message acknowledgements. With acknowledgements +messages fetched by Logstash but not yet sent into the +Logstash pipeline will be requeued by the server if Logstash +shuts down. Acknowledgements will however hurt the message +throughput. + +This will only send an ack back every `prefetch_count` messages. +Working in batches provides a performance boost here. + +[id="{version}-plugins-{type}s-{plugin}-arguments"] +===== `arguments` + + * Value type is <> + * Default value is `{}` + +Extra queue arguments as an array. +To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` + +[id="{version}-plugins-{type}s-{plugin}-auto_delete"] +===== `auto_delete` + + * Value type is <> + * Default value is `false` + +Should the queue be deleted on the broker when the last consumer +disconnects? Set this option to `false` if you want the queue to remain +on the broker, queueing up messages until a consumer comes along to +consume them. + +[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] +===== `automatic_recovery` + + * Value type is <> + * Default value is `true` + +Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! + +[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] +===== `connect_retry_interval` + + * Value type is <> + * Default value is `1` + +Time in seconds to wait before retrying a connection + +[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] +===== `connection_timeout` + + * Value type is <> + * There is no default value for this setting. + +The default connection timeout in milliseconds. If not specified the timeout is infinite. + +[id="{version}-plugins-{type}s-{plugin}-durable"] +===== `durable` + + * Value type is <> + * Default value is `false` + +Is this queue durable? (aka; Should it survive a broker restart?) + +[id="{version}-plugins-{type}s-{plugin}-exchange"] +===== `exchange` + + * Value type is <> + * There is no default value for this setting. + +The name of the exchange to bind the queue to. Specify `exchange_type` +as well to declare the exchange if it does not exist + +[id="{version}-plugins-{type}s-{plugin}-exchange_type"] +===== `exchange_type` + + * Value type is <> + * There is no default value for this setting. + +The type of the exchange to bind to. Specifying this will cause this plugin +to declare the exchange if it does not exist. + +[id="{version}-plugins-{type}s-{plugin}-exclusive"] +===== `exclusive` + + * Value type is <> + * Default value is `false` + +Is the queue exclusive? Exclusive queues can only be used by the connection +that declared them and will be deleted when it is closed (e.g. due to a Logstash +restart). + +[id="{version}-plugins-{type}s-{plugin}-heartbeat"] +===== `heartbeat` + + * Value type is <> + * There is no default value for this setting. + +Heartbeat delay in seconds. If unspecified no heartbeats will be sent + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Common functionality for the rabbitmq input/output +RabbitMQ server address(es) +host can either be a single host, or a list of hosts +i.e. + host => "localhost" +or + host => ["host01", "host02] + +if multiple hosts are provided on the initial connection and any subsequent +recovery attempts of the hosts is chosen at random and connected to. +Note that only one host connection is active at a time. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * Default value is `"logstash"` + +The routing key to use when binding a queue to the exchange. +This is only relevant for direct or topic exchanges. + +* Routing keys are ignored on fanout exchanges. +* Wildcards are not valid on direct exchanges. + +[id="{version}-plugins-{type}s-{plugin}-metadata_enabled"] +===== `metadata_enabled` + + * Value type is <> + * Default value is `false` + +Enable the storage of message headers and properties in `@metadata`. This may impact performance + +[id="{version}-plugins-{type}s-{plugin}-passive"] +===== `passive` + + * Value type is <> + * Default value is `false` + +If true the queue will be passively declared, meaning it must +already exist on the server. To have Logstash create the queue +if necessary leave this option as false. If actively declaring +a queue that already exists, the queue options for this plugin +(durable etc) must match those of the existing queue. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ password + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5672` + +RabbitMQ port to connect on + +[id="{version}-plugins-{type}s-{plugin}-prefetch_count"] +===== `prefetch_count` + + * Value type is <> + * Default value is `256` + +Prefetch count. If acknowledgements are enabled with the `ack` +option, specifies the number of outstanding unacknowledged +messages allowed. + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * Value type is <> + * Default value is `""` + +The properties to extract from each message and store in a +@metadata field. + +Technically the exchange, redeliver, and routing-key +properties belong to the envelope and not the message but we +ignore that distinction here. However, we extract the +headers separately via get_headers even though the header +table technically is a message property. + +Freezing all strings so that code modifying the event's +@metadata field can't touch them. + +If updating this list, remember to update the documentation +above too. +The default codec for this plugin is JSON. You can override this to suit your particular needs however. +The name of the queue Logstash will consume events from. If +left empty, a transient queue with an randomly chosen name +will be created. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable or disable SSL. +Note that by default remote certificate verification is off. +Specify ssl_certificate_path and ssl_certificate_password if you need +certificate verification + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] +===== `ssl_certificate_password` + + * Value type is <> + * There is no default value for this setting. + +Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] +===== `ssl_certificate_path` + + * Value type is <> + * There is no default value for this setting. + +Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host + +[id="{version}-plugins-{type}s-{plugin}-ssl_version"] +===== `ssl_version` + + * Value type is <> + * Default value is `"TLSv1.2"` + +Version of the SSL protocol to use. + +[id="{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds"] +===== `subscription_retry_interval_seconds` + + * This is a required setting. + * Value type is <> + * Default value is `5` + +Amount of time in seconds to wait after a failed subscription request +before retrying. Subscribes can fail if the server goes away and then comes back. + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-tls_certificate_password"] +===== `tls_certificate_password` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +TLS certificate password + +[id="{version}-plugins-{type}s-{plugin}-tls_certificate_path"] +===== `tls_certificate_path` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +TLS certifcate path + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ username + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `"/"` + +The vhost (virtual host) to use. If you don't know what this +is, leave the default. With the exception of the default +vhost ("/"), names of vhosts should not begin with a forward +slash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rabbitmq-v5.2.5.asciidoc b/docs/versioned-plugins/inputs/rabbitmq-v5.2.5.asciidoc new file mode 100644 index 000000000..2bc863d32 --- /dev/null +++ b/docs/versioned-plugins/inputs/rabbitmq-v5.2.5.asciidoc @@ -0,0 +1,415 @@ +:plugin: rabbitmq +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.2.5 +:release_date: 2017-08-18 +:changelog_url: https://github.com/logstash-plugins/logstash-input-rabbitmq/blob/v5.2.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rabbitmq input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Pull events from a http://www.rabbitmq.com/[RabbitMQ] queue. + +The default settings will create an entirely transient queue and listen for all messages by default. +If you need durability or any other advanced settings, please set the appropriate options + +This plugin uses the http://rubymarchhare.info/[March Hare] library +for interacting with the RabbitMQ server. Most configuration options +map directly to standard RabbitMQ and AMQP concepts. The +https://www.rabbitmq.com/amqp-0-9-1-reference.html[AMQP 0-9-1 reference guide] +and other parts of the RabbitMQ documentation are useful for deeper +understanding. + +The properties of messages received will be stored in the +`[@metadata][rabbitmq_properties]` field if the `@metadata_enabled` setting is checked. +Note that storing metadata may degrade performance. +The following properties may be available (in most cases dependent on whether +they were set by the sender): + +* app-id +* cluster-id +* consumer-tag +* content-encoding +* content-type +* correlation-id +* delivery-mode +* exchange +* expiration +* message-id +* priority +* redeliver +* reply-to +* routing-key +* timestamp +* type +* user-id + +For example, to get the RabbitMQ message's timestamp property +into the Logstash event's `@timestamp` field, use the date +filter to parse the `[@metadata][rabbitmq_properties][timestamp]` +field: +[source,ruby] + filter { + if [@metadata][rabbitmq_properties][timestamp] { + date { + match => ["[@metadata][rabbitmq_properties][timestamp]", "UNIX"] + } + } + } + +Additionally, any message headers will be saved in the +`[@metadata][rabbitmq_headers]` field. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rabbitmq Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ack>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-auto_delete>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclusive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_enabled>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefetch_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ack"] +===== `ack` + + * Value type is <> + * Default value is `true` + +Enable message acknowledgements. With acknowledgements +messages fetched by Logstash but not yet sent into the +Logstash pipeline will be requeued by the server if Logstash +shuts down. Acknowledgements will however hurt the message +throughput. + +This will only send an ack back every `prefetch_count` messages. +Working in batches provides a performance boost here. + +[id="{version}-plugins-{type}s-{plugin}-arguments"] +===== `arguments` + + * Value type is <> + * Default value is `{}` + +Extra queue arguments as an array. +To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` + +[id="{version}-plugins-{type}s-{plugin}-auto_delete"] +===== `auto_delete` + + * Value type is <> + * Default value is `false` + +Should the queue be deleted on the broker when the last consumer +disconnects? Set this option to `false` if you want the queue to remain +on the broker, queueing up messages until a consumer comes along to +consume them. + +[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] +===== `automatic_recovery` + + * Value type is <> + * Default value is `true` + +Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! + +[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] +===== `connect_retry_interval` + + * Value type is <> + * Default value is `1` + +Time in seconds to wait before retrying a connection + +[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] +===== `connection_timeout` + + * Value type is <> + * There is no default value for this setting. + +The default connection timeout in milliseconds. If not specified the timeout is infinite. + +[id="{version}-plugins-{type}s-{plugin}-durable"] +===== `durable` + + * Value type is <> + * Default value is `false` + +Is this queue durable? (aka; Should it survive a broker restart?) + +[id="{version}-plugins-{type}s-{plugin}-exchange"] +===== `exchange` + + * Value type is <> + * There is no default value for this setting. + +The name of the exchange to bind the queue to. Specify `exchange_type` +as well to declare the exchange if it does not exist + +[id="{version}-plugins-{type}s-{plugin}-exchange_type"] +===== `exchange_type` + + * Value type is <> + * There is no default value for this setting. + +The type of the exchange to bind to. Specifying this will cause this plugin +to declare the exchange if it does not exist. + +[id="{version}-plugins-{type}s-{plugin}-exclusive"] +===== `exclusive` + + * Value type is <> + * Default value is `false` + +Is the queue exclusive? Exclusive queues can only be used by the connection +that declared them and will be deleted when it is closed (e.g. due to a Logstash +restart). + +[id="{version}-plugins-{type}s-{plugin}-heartbeat"] +===== `heartbeat` + + * Value type is <> + * There is no default value for this setting. + +Heartbeat delay in seconds. If unspecified no heartbeats will be sent + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Common functionality for the rabbitmq input/output +RabbitMQ server address(es) +host can either be a single host, or a list of hosts +i.e. + host => "localhost" +or + host => ["host01", "host02] + +if multiple hosts are provided on the initial connection and any subsequent +recovery attempts of the hosts is chosen at random and connected to. +Note that only one host connection is active at a time. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * Default value is `"logstash"` + +The routing key to use when binding a queue to the exchange. +This is only relevant for direct or topic exchanges. + +* Routing keys are ignored on fanout exchanges. +* Wildcards are not valid on direct exchanges. + +[id="{version}-plugins-{type}s-{plugin}-metadata_enabled"] +===== `metadata_enabled` + + * Value type is <> + * Default value is `false` + +Enable the storage of message headers and properties in `@metadata`. This may impact performance + +[id="{version}-plugins-{type}s-{plugin}-passive"] +===== `passive` + + * Value type is <> + * Default value is `false` + +If true the queue will be passively declared, meaning it must +already exist on the server. To have Logstash create the queue +if necessary leave this option as false. If actively declaring +a queue that already exists, the queue options for this plugin +(durable etc) must match those of the existing queue. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ password + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5672` + +RabbitMQ port to connect on + +[id="{version}-plugins-{type}s-{plugin}-prefetch_count"] +===== `prefetch_count` + + * Value type is <> + * Default value is `256` + +Prefetch count. If acknowledgements are enabled with the `ack` +option, specifies the number of outstanding unacknowledged +messages allowed. + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * Value type is <> + * Default value is `""` + +The properties to extract from each message and store in a +@metadata field. + +Technically the exchange, redeliver, and routing-key +properties belong to the envelope and not the message but we +ignore that distinction here. However, we extract the +headers separately via get_headers even though the header +table technically is a message property. + +Freezing all strings so that code modifying the event's +@metadata field can't touch them. + +If updating this list, remember to update the documentation +above too. +The default codec for this plugin is JSON. You can override this to suit your particular needs however. +The name of the queue Logstash will consume events from. If +left empty, a transient queue with an randomly chosen name +will be created. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable or disable SSL. +Note that by default remote certificate verification is off. +Specify ssl_certificate_path and ssl_certificate_password if you need +certificate verification + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] +===== `ssl_certificate_password` + + * Value type is <> + * There is no default value for this setting. + +Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] +===== `ssl_certificate_path` + + * Value type is <> + * There is no default value for this setting. + +Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host + +[id="{version}-plugins-{type}s-{plugin}-ssl_version"] +===== `ssl_version` + + * Value type is <> + * Default value is `"TLSv1.2"` + +Version of the SSL protocol to use. + +[id="{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds"] +===== `subscription_retry_interval_seconds` + + * This is a required setting. + * Value type is <> + * Default value is `5` + +Amount of time in seconds to wait after a failed subscription request +before retrying. Subscribes can fail if the server goes away and then comes back. + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-tls_certificate_password"] +===== `tls_certificate_password` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +TLS certificate password + +[id="{version}-plugins-{type}s-{plugin}-tls_certificate_path"] +===== `tls_certificate_path` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +TLS certifcate path + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ username + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `"/"` + +The vhost (virtual host) to use. If you don't know what this +is, leave the default. With the exception of the default +vhost ("/"), names of vhosts should not begin with a forward +slash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rabbitmq-v6.0.0.asciidoc b/docs/versioned-plugins/inputs/rabbitmq-v6.0.0.asciidoc new file mode 100644 index 000000000..b746ba105 --- /dev/null +++ b/docs/versioned-plugins/inputs/rabbitmq-v6.0.0.asciidoc @@ -0,0 +1,395 @@ +:plugin: rabbitmq +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v6.0.0 +:release_date: 2017-08-02 +:changelog_url: https://github.com/logstash-plugins/logstash-input-rabbitmq/blob/v6.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rabbitmq input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Pull events from a http://www.rabbitmq.com/[RabbitMQ] queue. + +The default settings will create an entirely transient queue and listen for all messages by default. +If you need durability or any other advanced settings, please set the appropriate options + +This plugin uses the http://rubymarchhare.info/[March Hare] library +for interacting with the RabbitMQ server. Most configuration options +map directly to standard RabbitMQ and AMQP concepts. The +https://www.rabbitmq.com/amqp-0-9-1-reference.html[AMQP 0-9-1 reference guide] +and other parts of the RabbitMQ documentation are useful for deeper +understanding. + +The properties of messages received will be stored in the +`[@metadata][rabbitmq_properties]` field if the `@metadata_enabled` setting is checked. +Note that storing metadata may degrade performance. +The following properties may be available (in most cases dependent on whether +they were set by the sender): + +* app-id +* cluster-id +* consumer-tag +* content-encoding +* content-type +* correlation-id +* delivery-mode +* exchange +* expiration +* message-id +* priority +* redeliver +* reply-to +* routing-key +* timestamp +* type +* user-id + +For example, to get the RabbitMQ message's timestamp property +into the Logstash event's `@timestamp` field, use the date +filter to parse the `[@metadata][rabbitmq_properties][timestamp]` +field: +[source,ruby] + filter { + if [@metadata][rabbitmq_properties][timestamp] { + date { + match => ["[@metadata][rabbitmq_properties][timestamp]", "UNIX"] + } + } + } + +Additionally, any message headers will be saved in the +`[@metadata][rabbitmq_headers]` field. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rabbitmq Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ack>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-auto_delete>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclusive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_enabled>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefetch_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ack"] +===== `ack` + + * Value type is <> + * Default value is `true` + +Enable message acknowledgements. With acknowledgements +messages fetched by Logstash but not yet sent into the +Logstash pipeline will be requeued by the server if Logstash +shuts down. Acknowledgements will however hurt the message +throughput. + +This will only send an ack back every `prefetch_count` messages. +Working in batches provides a performance boost here. + +[id="{version}-plugins-{type}s-{plugin}-arguments"] +===== `arguments` + + * Value type is <> + * Default value is `{}` + +Extra queue arguments as an array. +To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` + +[id="{version}-plugins-{type}s-{plugin}-auto_delete"] +===== `auto_delete` + + * Value type is <> + * Default value is `false` + +Should the queue be deleted on the broker when the last consumer +disconnects? Set this option to `false` if you want the queue to remain +on the broker, queueing up messages until a consumer comes along to +consume them. + +[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] +===== `automatic_recovery` + + * Value type is <> + * Default value is `true` + +Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! + +[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] +===== `connect_retry_interval` + + * Value type is <> + * Default value is `1` + +Time in seconds to wait before retrying a connection + +[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] +===== `connection_timeout` + + * Value type is <> + * There is no default value for this setting. + +The default connection timeout in milliseconds. If not specified the timeout is infinite. + +[id="{version}-plugins-{type}s-{plugin}-durable"] +===== `durable` + + * Value type is <> + * Default value is `false` + +Is this queue durable? (aka; Should it survive a broker restart?) + +[id="{version}-plugins-{type}s-{plugin}-exchange"] +===== `exchange` + + * Value type is <> + * There is no default value for this setting. + +The name of the exchange to bind the queue to. Specify `exchange_type` +as well to declare the exchange if it does not exist + +[id="{version}-plugins-{type}s-{plugin}-exchange_type"] +===== `exchange_type` + + * Value type is <> + * There is no default value for this setting. + +The type of the exchange to bind to. Specifying this will cause this plugin +to declare the exchange if it does not exist. + +[id="{version}-plugins-{type}s-{plugin}-exclusive"] +===== `exclusive` + + * Value type is <> + * Default value is `false` + +Is the queue exclusive? Exclusive queues can only be used by the connection +that declared them and will be deleted when it is closed (e.g. due to a Logstash +restart). + +[id="{version}-plugins-{type}s-{plugin}-heartbeat"] +===== `heartbeat` + + * Value type is <> + * There is no default value for this setting. + +Heartbeat delay in seconds. If unspecified no heartbeats will be sent + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Common functionality for the rabbitmq input/output +RabbitMQ server address(es) +host can either be a single host, or a list of hosts +i.e. + host => "localhost" +or + host => ["host01", "host02] + +if multiple hosts are provided on the initial connection and any subsequent +recovery attempts of the hosts is chosen at random and connected to. +Note that only one host connection is active at a time. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * Default value is `"logstash"` + +The routing key to use when binding a queue to the exchange. +This is only relevant for direct or topic exchanges. + +* Routing keys are ignored on fanout exchanges. +* Wildcards are not valid on direct exchanges. + +[id="{version}-plugins-{type}s-{plugin}-metadata_enabled"] +===== `metadata_enabled` + + * Value type is <> + * Default value is `false` + +Enable the storage of message headers and properties in `@metadata`. This may impact performance + +[id="{version}-plugins-{type}s-{plugin}-passive"] +===== `passive` + + * Value type is <> + * Default value is `false` + +If true the queue will be passively declared, meaning it must +already exist on the server. To have Logstash create the queue +if necessary leave this option as false. If actively declaring +a queue that already exists, the queue options for this plugin +(durable etc) must match those of the existing queue. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ password + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5672` + +RabbitMQ port to connect on + +[id="{version}-plugins-{type}s-{plugin}-prefetch_count"] +===== `prefetch_count` + + * Value type is <> + * Default value is `256` + +Prefetch count. If acknowledgements are enabled with the `ack` +option, specifies the number of outstanding unacknowledged +messages allowed. + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * Value type is <> + * Default value is `""` + +The properties to extract from each message and store in a +@metadata field. + +Technically the exchange, redeliver, and routing-key +properties belong to the envelope and not the message but we +ignore that distinction here. However, we extract the +headers separately via get_headers even though the header +table technically is a message property. + +Freezing all strings so that code modifying the event's +@metadata field can't touch them. + +If updating this list, remember to update the documentation +above too. +The default codec for this plugin is JSON. You can override this to suit your particular needs however. +The name of the queue Logstash will consume events from. If +left empty, a transient queue with an randomly chosen name +will be created. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable or disable SSL. +Note that by default remote certificate verification is off. +Specify ssl_certificate_path and ssl_certificate_password if you need +certificate verification + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] +===== `ssl_certificate_password` + + * Value type is <> + * There is no default value for this setting. + +Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] +===== `ssl_certificate_path` + + * Value type is <> + * There is no default value for this setting. + +Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host + +[id="{version}-plugins-{type}s-{plugin}-ssl_version"] +===== `ssl_version` + + * Value type is <> + * Default value is `"TLSv1.2"` + +Version of the SSL protocol to use. + +[id="{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds"] +===== `subscription_retry_interval_seconds` + + * This is a required setting. + * Value type is <> + * Default value is `5` + +Amount of time in seconds to wait after a failed subscription request +before retrying. Subscribes can fail if the server goes away and then comes back. + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ username + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `"/"` + +The vhost (virtual host) to use. If you don't know what this +is, leave the default. With the exception of the default +vhost ("/"), names of vhosts should not begin with a forward +slash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rabbitmq-v6.0.1.asciidoc b/docs/versioned-plugins/inputs/rabbitmq-v6.0.1.asciidoc new file mode 100644 index 000000000..29eae981e --- /dev/null +++ b/docs/versioned-plugins/inputs/rabbitmq-v6.0.1.asciidoc @@ -0,0 +1,395 @@ +:plugin: rabbitmq +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v6.0.1 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-rabbitmq/blob/v6.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rabbitmq input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Pull events from a http://www.rabbitmq.com/[RabbitMQ] queue. + +The default settings will create an entirely transient queue and listen for all messages by default. +If you need durability or any other advanced settings, please set the appropriate options + +This plugin uses the http://rubymarchhare.info/[March Hare] library +for interacting with the RabbitMQ server. Most configuration options +map directly to standard RabbitMQ and AMQP concepts. The +https://www.rabbitmq.com/amqp-0-9-1-reference.html[AMQP 0-9-1 reference guide] +and other parts of the RabbitMQ documentation are useful for deeper +understanding. + +The properties of messages received will be stored in the +`[@metadata][rabbitmq_properties]` field if the `@metadata_enabled` setting is checked. +Note that storing metadata may degrade performance. +The following properties may be available (in most cases dependent on whether +they were set by the sender): + +* app-id +* cluster-id +* consumer-tag +* content-encoding +* content-type +* correlation-id +* delivery-mode +* exchange +* expiration +* message-id +* priority +* redeliver +* reply-to +* routing-key +* timestamp +* type +* user-id + +For example, to get the RabbitMQ message's timestamp property +into the Logstash event's `@timestamp` field, use the date +filter to parse the `[@metadata][rabbitmq_properties][timestamp]` +field: +[source,ruby] + filter { + if [@metadata][rabbitmq_properties][timestamp] { + date { + match => ["[@metadata][rabbitmq_properties][timestamp]", "UNIX"] + } + } + } + +Additionally, any message headers will be saved in the +`[@metadata][rabbitmq_headers]` field. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rabbitmq Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ack>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-auto_delete>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclusive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_enabled>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefetch_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ack"] +===== `ack` + + * Value type is <> + * Default value is `true` + +Enable message acknowledgements. With acknowledgements +messages fetched by Logstash but not yet sent into the +Logstash pipeline will be requeued by the server if Logstash +shuts down. Acknowledgements will however hurt the message +throughput. + +This will only send an ack back every `prefetch_count` messages. +Working in batches provides a performance boost here. + +[id="{version}-plugins-{type}s-{plugin}-arguments"] +===== `arguments` + + * Value type is <> + * Default value is `{}` + +Extra queue arguments as an array. +To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` + +[id="{version}-plugins-{type}s-{plugin}-auto_delete"] +===== `auto_delete` + + * Value type is <> + * Default value is `false` + +Should the queue be deleted on the broker when the last consumer +disconnects? Set this option to `false` if you want the queue to remain +on the broker, queueing up messages until a consumer comes along to +consume them. + +[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] +===== `automatic_recovery` + + * Value type is <> + * Default value is `true` + +Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! + +[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] +===== `connect_retry_interval` + + * Value type is <> + * Default value is `1` + +Time in seconds to wait before retrying a connection + +[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] +===== `connection_timeout` + + * Value type is <> + * There is no default value for this setting. + +The default connection timeout in milliseconds. If not specified the timeout is infinite. + +[id="{version}-plugins-{type}s-{plugin}-durable"] +===== `durable` + + * Value type is <> + * Default value is `false` + +Is this queue durable? (aka; Should it survive a broker restart?) + +[id="{version}-plugins-{type}s-{plugin}-exchange"] +===== `exchange` + + * Value type is <> + * There is no default value for this setting. + +The name of the exchange to bind the queue to. Specify `exchange_type` +as well to declare the exchange if it does not exist + +[id="{version}-plugins-{type}s-{plugin}-exchange_type"] +===== `exchange_type` + + * Value type is <> + * There is no default value for this setting. + +The type of the exchange to bind to. Specifying this will cause this plugin +to declare the exchange if it does not exist. + +[id="{version}-plugins-{type}s-{plugin}-exclusive"] +===== `exclusive` + + * Value type is <> + * Default value is `false` + +Is the queue exclusive? Exclusive queues can only be used by the connection +that declared them and will be deleted when it is closed (e.g. due to a Logstash +restart). + +[id="{version}-plugins-{type}s-{plugin}-heartbeat"] +===== `heartbeat` + + * Value type is <> + * There is no default value for this setting. + +Heartbeat delay in seconds. If unspecified no heartbeats will be sent + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Common functionality for the rabbitmq input/output +RabbitMQ server address(es) +host can either be a single host, or a list of hosts +i.e. + host => "localhost" +or + host => ["host01", "host02] + +if multiple hosts are provided on the initial connection and any subsequent +recovery attempts of the hosts is chosen at random and connected to. +Note that only one host connection is active at a time. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * Default value is `"logstash"` + +The routing key to use when binding a queue to the exchange. +This is only relevant for direct or topic exchanges. + +* Routing keys are ignored on fanout exchanges. +* Wildcards are not valid on direct exchanges. + +[id="{version}-plugins-{type}s-{plugin}-metadata_enabled"] +===== `metadata_enabled` + + * Value type is <> + * Default value is `false` + +Enable the storage of message headers and properties in `@metadata`. This may impact performance + +[id="{version}-plugins-{type}s-{plugin}-passive"] +===== `passive` + + * Value type is <> + * Default value is `false` + +If true the queue will be passively declared, meaning it must +already exist on the server. To have Logstash create the queue +if necessary leave this option as false. If actively declaring +a queue that already exists, the queue options for this plugin +(durable etc) must match those of the existing queue. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ password + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5672` + +RabbitMQ port to connect on + +[id="{version}-plugins-{type}s-{plugin}-prefetch_count"] +===== `prefetch_count` + + * Value type is <> + * Default value is `256` + +Prefetch count. If acknowledgements are enabled with the `ack` +option, specifies the number of outstanding unacknowledged +messages allowed. + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * Value type is <> + * Default value is `""` + +The properties to extract from each message and store in a +@metadata field. + +Technically the exchange, redeliver, and routing-key +properties belong to the envelope and not the message but we +ignore that distinction here. However, we extract the +headers separately via get_headers even though the header +table technically is a message property. + +Freezing all strings so that code modifying the event's +@metadata field can't touch them. + +If updating this list, remember to update the documentation +above too. +The default codec for this plugin is JSON. You can override this to suit your particular needs however. +The name of the queue Logstash will consume events from. If +left empty, a transient queue with an randomly chosen name +will be created. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable or disable SSL. +Note that by default remote certificate verification is off. +Specify ssl_certificate_path and ssl_certificate_password if you need +certificate verification + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] +===== `ssl_certificate_password` + + * Value type is <> + * There is no default value for this setting. + +Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] +===== `ssl_certificate_path` + + * Value type is <> + * There is no default value for this setting. + +Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host + +[id="{version}-plugins-{type}s-{plugin}-ssl_version"] +===== `ssl_version` + + * Value type is <> + * Default value is `"TLSv1.2"` + +Version of the SSL protocol to use. + +[id="{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds"] +===== `subscription_retry_interval_seconds` + + * This is a required setting. + * Value type is <> + * Default value is `5` + +Amount of time in seconds to wait after a failed subscription request +before retrying. Subscribes can fail if the server goes away and then comes back. + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ username + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `"/"` + +The vhost (virtual host) to use. If you don't know what this +is, leave the default. With the exception of the default +vhost ("/"), names of vhosts should not begin with a forward +slash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rabbitmq-v6.0.2.asciidoc b/docs/versioned-plugins/inputs/rabbitmq-v6.0.2.asciidoc new file mode 100644 index 000000000..44a58bb02 --- /dev/null +++ b/docs/versioned-plugins/inputs/rabbitmq-v6.0.2.asciidoc @@ -0,0 +1,395 @@ +:plugin: rabbitmq +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v6.0.2 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-rabbitmq/blob/v6.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rabbitmq input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Pull events from a http://www.rabbitmq.com/[RabbitMQ] queue. + +The default settings will create an entirely transient queue and listen for all messages by default. +If you need durability or any other advanced settings, please set the appropriate options + +This plugin uses the http://rubymarchhare.info/[March Hare] library +for interacting with the RabbitMQ server. Most configuration options +map directly to standard RabbitMQ and AMQP concepts. The +https://www.rabbitmq.com/amqp-0-9-1-reference.html[AMQP 0-9-1 reference guide] +and other parts of the RabbitMQ documentation are useful for deeper +understanding. + +The properties of messages received will be stored in the +`[@metadata][rabbitmq_properties]` field if the `@metadata_enabled` setting is checked. +Note that storing metadata may degrade performance. +The following properties may be available (in most cases dependent on whether +they were set by the sender): + +* app-id +* cluster-id +* consumer-tag +* content-encoding +* content-type +* correlation-id +* delivery-mode +* exchange +* expiration +* message-id +* priority +* redeliver +* reply-to +* routing-key +* timestamp +* type +* user-id + +For example, to get the RabbitMQ message's timestamp property +into the Logstash event's `@timestamp` field, use the date +filter to parse the `[@metadata][rabbitmq_properties][timestamp]` +field: +[source,ruby] + filter { + if [@metadata][rabbitmq_properties][timestamp] { + date { + match => ["[@metadata][rabbitmq_properties][timestamp]", "UNIX"] + } + } + } + +Additionally, any message headers will be saved in the +`[@metadata][rabbitmq_headers]` field. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rabbitmq Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ack>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-auto_delete>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclusive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_enabled>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefetch_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ack"] +===== `ack` + + * Value type is <> + * Default value is `true` + +Enable message acknowledgements. With acknowledgements +messages fetched by Logstash but not yet sent into the +Logstash pipeline will be requeued by the server if Logstash +shuts down. Acknowledgements will however hurt the message +throughput. + +This will only send an ack back every `prefetch_count` messages. +Working in batches provides a performance boost here. + +[id="{version}-plugins-{type}s-{plugin}-arguments"] +===== `arguments` + + * Value type is <> + * Default value is `{}` + +Extra queue arguments as an array. +To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` + +[id="{version}-plugins-{type}s-{plugin}-auto_delete"] +===== `auto_delete` + + * Value type is <> + * Default value is `false` + +Should the queue be deleted on the broker when the last consumer +disconnects? Set this option to `false` if you want the queue to remain +on the broker, queueing up messages until a consumer comes along to +consume them. + +[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] +===== `automatic_recovery` + + * Value type is <> + * Default value is `true` + +Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! + +[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] +===== `connect_retry_interval` + + * Value type is <> + * Default value is `1` + +Time in seconds to wait before retrying a connection + +[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] +===== `connection_timeout` + + * Value type is <> + * There is no default value for this setting. + +The default connection timeout in milliseconds. If not specified the timeout is infinite. + +[id="{version}-plugins-{type}s-{plugin}-durable"] +===== `durable` + + * Value type is <> + * Default value is `false` + +Is this queue durable? (aka; Should it survive a broker restart?) + +[id="{version}-plugins-{type}s-{plugin}-exchange"] +===== `exchange` + + * Value type is <> + * There is no default value for this setting. + +The name of the exchange to bind the queue to. Specify `exchange_type` +as well to declare the exchange if it does not exist + +[id="{version}-plugins-{type}s-{plugin}-exchange_type"] +===== `exchange_type` + + * Value type is <> + * There is no default value for this setting. + +The type of the exchange to bind to. Specifying this will cause this plugin +to declare the exchange if it does not exist. + +[id="{version}-plugins-{type}s-{plugin}-exclusive"] +===== `exclusive` + + * Value type is <> + * Default value is `false` + +Is the queue exclusive? Exclusive queues can only be used by the connection +that declared them and will be deleted when it is closed (e.g. due to a Logstash +restart). + +[id="{version}-plugins-{type}s-{plugin}-heartbeat"] +===== `heartbeat` + + * Value type is <> + * There is no default value for this setting. + +Heartbeat delay in seconds. If unspecified no heartbeats will be sent + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Common functionality for the rabbitmq input/output +RabbitMQ server address(es) +host can either be a single host, or a list of hosts +i.e. + host => "localhost" +or + host => ["host01", "host02] + +if multiple hosts are provided on the initial connection and any subsequent +recovery attempts of the hosts is chosen at random and connected to. +Note that only one host connection is active at a time. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * Default value is `"logstash"` + +The routing key to use when binding a queue to the exchange. +This is only relevant for direct or topic exchanges. + +* Routing keys are ignored on fanout exchanges. +* Wildcards are not valid on direct exchanges. + +[id="{version}-plugins-{type}s-{plugin}-metadata_enabled"] +===== `metadata_enabled` + + * Value type is <> + * Default value is `false` + +Enable the storage of message headers and properties in `@metadata`. This may impact performance + +[id="{version}-plugins-{type}s-{plugin}-passive"] +===== `passive` + + * Value type is <> + * Default value is `false` + +If true the queue will be passively declared, meaning it must +already exist on the server. To have Logstash create the queue +if necessary leave this option as false. If actively declaring +a queue that already exists, the queue options for this plugin +(durable etc) must match those of the existing queue. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ password + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5672` + +RabbitMQ port to connect on + +[id="{version}-plugins-{type}s-{plugin}-prefetch_count"] +===== `prefetch_count` + + * Value type is <> + * Default value is `256` + +Prefetch count. If acknowledgements are enabled with the `ack` +option, specifies the number of outstanding unacknowledged +messages allowed. + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * Value type is <> + * Default value is `""` + +The properties to extract from each message and store in a +@metadata field. + +Technically the exchange, redeliver, and routing-key +properties belong to the envelope and not the message but we +ignore that distinction here. However, we extract the +headers separately via get_headers even though the header +table technically is a message property. + +Freezing all strings so that code modifying the event's +@metadata field can't touch them. + +If updating this list, remember to update the documentation +above too. +The default codec for this plugin is JSON. You can override this to suit your particular needs however. +The name of the queue Logstash will consume events from. If +left empty, a transient queue with an randomly chosen name +will be created. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable or disable SSL. +Note that by default remote certificate verification is off. +Specify ssl_certificate_path and ssl_certificate_password if you need +certificate verification + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] +===== `ssl_certificate_password` + + * Value type is <> + * There is no default value for this setting. + +Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] +===== `ssl_certificate_path` + + * Value type is <> + * There is no default value for this setting. + +Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host + +[id="{version}-plugins-{type}s-{plugin}-ssl_version"] +===== `ssl_version` + + * Value type is <> + * Default value is `"TLSv1.2"` + +Version of the SSL protocol to use. + +[id="{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds"] +===== `subscription_retry_interval_seconds` + + * This is a required setting. + * Value type is <> + * Default value is `5` + +Amount of time in seconds to wait after a failed subscription request +before retrying. Subscribes can fail if the server goes away and then comes back. + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ username + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `"/"` + +The vhost (virtual host) to use. If you don't know what this +is, leave the default. With the exception of the default +vhost ("/"), names of vhosts should not begin with a forward +slash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rackspace-index.asciidoc b/docs/versioned-plugins/inputs/rackspace-index.asciidoc new file mode 100644 index 000000000..3314354f0 --- /dev/null +++ b/docs/versioned-plugins/inputs/rackspace-index.asciidoc @@ -0,0 +1,12 @@ +:plugin: rackspace +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-06-23 +|======================================================================= + +include::rackspace-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/rackspace-v3.0.1.asciidoc b/docs/versioned-plugins/inputs/rackspace-v3.0.1.asciidoc new file mode 100644 index 000000000..2dce67d9b --- /dev/null +++ b/docs/versioned-plugins/inputs/rackspace-v3.0.1.asciidoc @@ -0,0 +1,102 @@ +:plugin: rackspace +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-rackspace/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Rackspace input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rackspace Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-claim>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-api_key"] +===== `api_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Rackspace Cloud API Key + +[id="{version}-plugins-{type}s-{plugin}-claim"] +===== `claim` + + * Value type is <> + * Default value is `1` + +number of messages to claim +Min: 1, Max: 10 + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * Value type is <> + * Default value is `"logstash"` + +Rackspace Queue Name + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value type is <> + * Default value is `"dfw"` + +Rackspace region +`ord, dfw, lon, syd,` etc + +[id="{version}-plugins-{type}s-{plugin}-ttl"] +===== `ttl` + + * Value type is <> + * Default value is `60` + +length of time to hold claim +Min: 60 + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Rackspace Cloud Username + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/redis-index.asciidoc b/docs/versioned-plugins/inputs/redis-index.asciidoc new file mode 100644 index 000000000..71634f4b1 --- /dev/null +++ b/docs/versioned-plugins/inputs/redis-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: redis +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-09-12 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::redis-v3.1.6.asciidoc[] +include::redis-v3.1.5.asciidoc[] +include::redis-v3.1.4.asciidoc[] +include::redis-v3.1.3.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/redis-v3.1.3.asciidoc b/docs/versioned-plugins/inputs/redis-v3.1.3.asciidoc new file mode 100644 index 000000000..0606e9ede --- /dev/null +++ b/docs/versioned-plugins/inputs/redis-v3.1.3.asciidoc @@ -0,0 +1,139 @@ +:plugin: redis +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-redis/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Redis input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will read events from a Redis instance; it supports both Redis channels and lists. +The list command (BLPOP) used by Logstash is supported in Redis v1.3.1+, and +the channel commands used by Logstash are found in Redis v1.3.8+. +While you may be able to make these Redis versions work, the best performance +and stability will be found in more recent stable versions. Versions 2.6.0+ +are recommended. + +For more information about Redis, see + +`batch_count` note: If you use the `batch_count` setting, you *must* use a Redis version 2.6.0 or +newer. Anything older does not support the operations used by batching. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Redis Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-batch_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel", "pattern_channel"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-batch_count"] +===== `batch_count` + + * Value type is <> + * Default value is `125` + +The number of events to return from Redis using EVAL. + +[id="{version}-plugins-{type}s-{plugin}-data_type"] +===== `data_type` + + * This is a required setting. + * Value can be any of: `list`, `channel`, `pattern_channel` + * There is no default value for this setting. + +Specify either list or channel. If `redis\_type` is `list`, then we will BLPOP the +key. If `redis\_type` is `channel`, then we will SUBSCRIBE to the key. +If `redis\_type` is `pattern_channel`, then we will PSUBSCRIBE to the key. + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * Value type is <> + * Default value is `0` + +The Redis database number. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"127.0.0.1"` + +The hostname of your Redis server. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of a Redis list or channel. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate with. There is no authentication by default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6379` + +The port to connect on. + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `5` + +Initial connection timeout in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/redis-v3.1.4.asciidoc b/docs/versioned-plugins/inputs/redis-v3.1.4.asciidoc new file mode 100644 index 000000000..18e4636d8 --- /dev/null +++ b/docs/versioned-plugins/inputs/redis-v3.1.4.asciidoc @@ -0,0 +1,139 @@ +:plugin: redis +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.4 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-redis/blob/v3.1.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Redis input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will read events from a Redis instance; it supports both Redis channels and lists. +The list command (BLPOP) used by Logstash is supported in Redis v1.3.1+, and +the channel commands used by Logstash are found in Redis v1.3.8+. +While you may be able to make these Redis versions work, the best performance +and stability will be found in more recent stable versions. Versions 2.6.0+ +are recommended. + +For more information about Redis, see + +`batch_count` note: If you use the `batch_count` setting, you *must* use a Redis version 2.6.0 or +newer. Anything older does not support the operations used by batching. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Redis Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-batch_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel", "pattern_channel"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-batch_count"] +===== `batch_count` + + * Value type is <> + * Default value is `125` + +The number of events to return from Redis using EVAL. + +[id="{version}-plugins-{type}s-{plugin}-data_type"] +===== `data_type` + + * This is a required setting. + * Value can be any of: `list`, `channel`, `pattern_channel` + * There is no default value for this setting. + +Specify either list or channel. If `redis\_type` is `list`, then we will BLPOP the +key. If `redis\_type` is `channel`, then we will SUBSCRIBE to the key. +If `redis\_type` is `pattern_channel`, then we will PSUBSCRIBE to the key. + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * Value type is <> + * Default value is `0` + +The Redis database number. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"127.0.0.1"` + +The hostname of your Redis server. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of a Redis list or channel. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate with. There is no authentication by default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6379` + +The port to connect on. + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `5` + +Initial connection timeout in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/redis-v3.1.5.asciidoc b/docs/versioned-plugins/inputs/redis-v3.1.5.asciidoc new file mode 100644 index 000000000..25ee9a42a --- /dev/null +++ b/docs/versioned-plugins/inputs/redis-v3.1.5.asciidoc @@ -0,0 +1,139 @@ +:plugin: redis +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.5 +:release_date: 2017-09-12 +:changelog_url: https://github.com/logstash-plugins/logstash-input-redis/blob/v3.1.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Redis input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will read events from a Redis instance; it supports both Redis channels and lists. +The list command (BLPOP) used by Logstash is supported in Redis v1.3.1+, and +the channel commands used by Logstash are found in Redis v1.3.8+. +While you may be able to make these Redis versions work, the best performance +and stability will be found in more recent stable versions. Versions 2.6.0+ +are recommended. + +For more information about Redis, see + +`batch_count` note: If you use the `batch_count` setting, you *must* use a Redis version 2.6.0 or +newer. Anything older does not support the operations used by batching. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Redis Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-batch_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel", "pattern_channel"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-batch_count"] +===== `batch_count` + + * Value type is <> + * Default value is `125` + +The number of events to return from Redis using EVAL. + +[id="{version}-plugins-{type}s-{plugin}-data_type"] +===== `data_type` + + * This is a required setting. + * Value can be any of: `list`, `channel`, `pattern_channel` + * There is no default value for this setting. + +Specify either list or channel. If `redis\_type` is `list`, then we will BLPOP the +key. If `redis\_type` is `channel`, then we will SUBSCRIBE to the key. +If `redis\_type` is `pattern_channel`, then we will PSUBSCRIBE to the key. + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * Value type is <> + * Default value is `0` + +The Redis database number. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"127.0.0.1"` + +The hostname of your Redis server. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of a Redis list or channel. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate with. There is no authentication by default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6379` + +The port to connect on. + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `5` + +Initial connection timeout in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/redis-v3.1.6.asciidoc b/docs/versioned-plugins/inputs/redis-v3.1.6.asciidoc new file mode 100644 index 000000000..d7817f32a --- /dev/null +++ b/docs/versioned-plugins/inputs/redis-v3.1.6.asciidoc @@ -0,0 +1,139 @@ +:plugin: redis +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.6 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-redis/blob/v3.1.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Redis input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input will read events from a Redis instance; it supports both Redis channels and lists. +The list command (BLPOP) used by Logstash is supported in Redis v1.3.1+, and +the channel commands used by Logstash are found in Redis v1.3.8+. +While you may be able to make these Redis versions work, the best performance +and stability will be found in more recent stable versions. Versions 2.6.0+ +are recommended. + +For more information about Redis, see + +`batch_count` note: If you use the `batch_count` setting, you *must* use a Redis version 2.6.0 or +newer. Anything older does not support the operations used by batching. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Redis Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-batch_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel", "pattern_channel"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-batch_count"] +===== `batch_count` + + * Value type is <> + * Default value is `125` + +The number of events to return from Redis using EVAL. + +[id="{version}-plugins-{type}s-{plugin}-data_type"] +===== `data_type` + + * This is a required setting. + * Value can be any of: `list`, `channel`, `pattern_channel` + * There is no default value for this setting. + +Specify either list or channel. If `redis\_type` is `list`, then we will BLPOP the +key. If `redis\_type` is `channel`, then we will SUBSCRIBE to the key. +If `redis\_type` is `pattern_channel`, then we will PSUBSCRIBE to the key. + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * Value type is <> + * Default value is `0` + +The Redis database number. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"127.0.0.1"` + +The hostname of your Redis server. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of a Redis list or channel. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate with. There is no authentication by default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6379` + +The port to connect on. + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `5` + +Initial connection timeout in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/relp-index.asciidoc b/docs/versioned-plugins/inputs/relp-index.asciidoc new file mode 100644 index 000000000..edd7f331c --- /dev/null +++ b/docs/versioned-plugins/inputs/relp-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: relp +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::relp-v3.0.3.asciidoc[] +include::relp-v3.0.2.asciidoc[] +include::relp-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/relp-v3.0.1.asciidoc b/docs/versioned-plugins/inputs/relp-v3.0.1.asciidoc new file mode 100644 index 000000000..b6624af26 --- /dev/null +++ b/docs/versioned-plugins/inputs/relp-v3.0.1.asciidoc @@ -0,0 +1,126 @@ +:plugin: relp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-relp/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Relp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read RELP events over a TCP socket. + +For more information about RELP, see + + +This protocol implements application-level acknowledgements to help protect +against message loss. + +Message acks only function as far as messages being put into the queue for +filters; anything lost after that point will not be retransmitted + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Relp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` + + * Value type is <> + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/relp-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/relp-v3.0.2.asciidoc new file mode 100644 index 000000000..f6b536910 --- /dev/null +++ b/docs/versioned-plugins/inputs/relp-v3.0.2.asciidoc @@ -0,0 +1,126 @@ +:plugin: relp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-relp/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Relp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read RELP events over a TCP socket. + +For more information about RELP, see + + +This protocol implements application-level acknowledgements to help protect +against message loss. + +Message acks only function as far as messages being put into the queue for +filters; anything lost after that point will not be retransmitted + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Relp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` + + * Value type is <> + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/relp-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/relp-v3.0.3.asciidoc new file mode 100644 index 000000000..d6208e93e --- /dev/null +++ b/docs/versioned-plugins/inputs/relp-v3.0.3.asciidoc @@ -0,0 +1,126 @@ +:plugin: relp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-relp/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Relp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read RELP events over a TCP socket. + +For more information about RELP, see + + +This protocol implements application-level acknowledgements to help protect +against message loss. + +Message acks only function as far as messages being put into the queue for +filters; anything lost after that point will not be retransmitted + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Relp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to listen on. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` + + * Value type is <> + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rss-index.asciidoc b/docs/versioned-plugins/inputs/rss-index.asciidoc new file mode 100644 index 000000000..9dfad7247 --- /dev/null +++ b/docs/versioned-plugins/inputs/rss-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: rss +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::rss-v3.0.4.asciidoc[] +include::rss-v3.0.3.asciidoc[] +include::rss-v3.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/rss-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/rss-v3.0.2.asciidoc new file mode 100644 index 000000000..ee9a436f3 --- /dev/null +++ b/docs/versioned-plugins/inputs/rss-v3.0.2.asciidoc @@ -0,0 +1,70 @@ +:plugin: rss +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-rss/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Rss input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Run command line tools and capture the whole output as an event. + +Notes: + +* The `@source` of this event will be the command run. +* The `@message` of this event will be the entire stdout of the command + as one event. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rss Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Interval to run the command. Value is in seconds. + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +RSS/Atom feed URL + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rss-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/rss-v3.0.3.asciidoc new file mode 100644 index 000000000..54d360e8e --- /dev/null +++ b/docs/versioned-plugins/inputs/rss-v3.0.3.asciidoc @@ -0,0 +1,70 @@ +:plugin: rss +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-rss/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rss input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Run command line tools and capture the whole output as an event. + +Notes: + +* The `@source` of this event will be the command run. +* The `@message` of this event will be the entire stdout of the command + as one event. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rss Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Interval to run the command. Value is in seconds. + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +RSS/Atom feed URL + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rss-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/rss-v3.0.4.asciidoc new file mode 100644 index 000000000..0b8046065 --- /dev/null +++ b/docs/versioned-plugins/inputs/rss-v3.0.4.asciidoc @@ -0,0 +1,70 @@ +:plugin: rss +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-rss/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rss input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Run command line tools and capture the whole output as an event. + +Notes: + +* The `@source` of this event will be the command run. +* The `@message` of this event will be the entire stdout of the command + as one event. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rss Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Interval to run the command. Value is in seconds. + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +RSS/Atom feed URL + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/s3-index.asciidoc b/docs/versioned-plugins/inputs/s3-index.asciidoc new file mode 100644 index 000000000..f82e887ba --- /dev/null +++ b/docs/versioned-plugins/inputs/s3-index.asciidoc @@ -0,0 +1,22 @@ +:plugin: s3 +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2018-01-09 +| <> | 2017-12-19 +| <> | 2017-11-07 +| <> | 2017-10-03 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::s3-v3.2.0.asciidoc[] +include::s3-v3.1.9.asciidoc[] +include::s3-v3.1.8.asciidoc[] +include::s3-v3.1.7.asciidoc[] +include::s3-v3.1.6.asciidoc[] +include::s3-v3.1.5.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/s3-v3.1.5.asciidoc b/docs/versioned-plugins/inputs/s3-v3.1.5.asciidoc new file mode 100644 index 000000000..02ad0778a --- /dev/null +++ b/docs/versioned-plugins/inputs/s3-v3.1.5.asciidoc @@ -0,0 +1,214 @@ +:plugin: s3 +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-s3/blob/v3.1.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== S3 input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from files from a S3 bucket. + +Each line from each file generates an event. +Files ending in `.gz` are handled as gzip'ed files. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== S3 Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] +===== `backup_add_prefix` + + * Value type is <> + * Default value is `nil` + +Append a prefix to the key (full path including file name in s3) after processing. +If backing up to another (or the same) bucket, this effectively lets you +choose a new 'folder' to place the files in + +[id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] +===== `backup_to_bucket` + + * Value type is <> + * Default value is `nil` + +Name of a S3 bucket to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] +===== `backup_to_dir` + + * Value type is <> + * Default value is `nil` + +Path of a local directory to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the S3 bucket. + +[id="{version}-plugins-{type}s-{plugin}-delete"] +===== `delete` + + * Value type is <> + * Default value is `false` + +Whether to delete processed files from the original bucket. + +[id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] +===== `exclude_pattern` + + * Value type is <> + * Default value is `nil` + +Ruby style regexp of keys to exclude from the bucket + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `60` + +Interval to wait between to check the file list again after a run is finished. +Value is in seconds. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is <> + * Default value is `nil` + +If specified, the prefix of filenames in the bucket must match (not a regexp) + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is <> + * Default value is `nil` + +Where to write the since database (keeps track of the date +the last handled file was added to S3). The default will write +sincedb files to some path matching "$HOME/.sincedb*" +Should be a path with filename not just a directory. + +[id="{version}-plugins-{type}s-{plugin}-temporary_directory"] +===== `temporary_directory` + + * Value type is <> + * Default value is `"/tmp/logstash"` + +Set the directory where logstash will store the tmp files before processing them. +default to the current OS temporary directory in linux /tmp/logstash + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/s3-v3.1.6.asciidoc b/docs/versioned-plugins/inputs/s3-v3.1.6.asciidoc new file mode 100644 index 000000000..d5823f473 --- /dev/null +++ b/docs/versioned-plugins/inputs/s3-v3.1.6.asciidoc @@ -0,0 +1,214 @@ +:plugin: s3 +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.6 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-s3/blob/v3.1.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== S3 input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from files from a S3 bucket. + +Each line from each file generates an event. +Files ending in `.gz` are handled as gzip'ed files. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== S3 Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] +===== `backup_add_prefix` + + * Value type is <> + * Default value is `nil` + +Append a prefix to the key (full path including file name in s3) after processing. +If backing up to another (or the same) bucket, this effectively lets you +choose a new 'folder' to place the files in + +[id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] +===== `backup_to_bucket` + + * Value type is <> + * Default value is `nil` + +Name of a S3 bucket to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] +===== `backup_to_dir` + + * Value type is <> + * Default value is `nil` + +Path of a local directory to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the S3 bucket. + +[id="{version}-plugins-{type}s-{plugin}-delete"] +===== `delete` + + * Value type is <> + * Default value is `false` + +Whether to delete processed files from the original bucket. + +[id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] +===== `exclude_pattern` + + * Value type is <> + * Default value is `nil` + +Ruby style regexp of keys to exclude from the bucket + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `60` + +Interval to wait between to check the file list again after a run is finished. +Value is in seconds. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is <> + * Default value is `nil` + +If specified, the prefix of filenames in the bucket must match (not a regexp) + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is <> + * Default value is `nil` + +Where to write the since database (keeps track of the date +the last handled file was added to S3). The default will write +sincedb files to some path matching "$HOME/.sincedb*" +Should be a path with filename not just a directory. + +[id="{version}-plugins-{type}s-{plugin}-temporary_directory"] +===== `temporary_directory` + + * Value type is <> + * Default value is `"/tmp/logstash"` + +Set the directory where logstash will store the tmp files before processing them. +default to the current OS temporary directory in linux /tmp/logstash + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/s3-v3.1.7.asciidoc b/docs/versioned-plugins/inputs/s3-v3.1.7.asciidoc new file mode 100644 index 000000000..ecdc5f624 --- /dev/null +++ b/docs/versioned-plugins/inputs/s3-v3.1.7.asciidoc @@ -0,0 +1,214 @@ +:plugin: s3 +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.7 +:release_date: 2017-10-03 +:changelog_url: https://github.com/logstash-plugins/logstash-input-s3/blob/v3.1.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== S3 input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from files from a S3 bucket. + +Each line from each file generates an event. +Files ending in `.gz` are handled as gzip'ed files. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== S3 Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] +===== `backup_add_prefix` + + * Value type is <> + * Default value is `nil` + +Append a prefix to the key (full path including file name in s3) after processing. +If backing up to another (or the same) bucket, this effectively lets you +choose a new 'folder' to place the files in + +[id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] +===== `backup_to_bucket` + + * Value type is <> + * Default value is `nil` + +Name of a S3 bucket to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] +===== `backup_to_dir` + + * Value type is <> + * Default value is `nil` + +Path of a local directory to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the S3 bucket. + +[id="{version}-plugins-{type}s-{plugin}-delete"] +===== `delete` + + * Value type is <> + * Default value is `false` + +Whether to delete processed files from the original bucket. + +[id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] +===== `exclude_pattern` + + * Value type is <> + * Default value is `nil` + +Ruby style regexp of keys to exclude from the bucket + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `60` + +Interval to wait between to check the file list again after a run is finished. +Value is in seconds. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is <> + * Default value is `nil` + +If specified, the prefix of filenames in the bucket must match (not a regexp) + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is <> + * Default value is `nil` + +Where to write the since database (keeps track of the date +the last handled file was added to S3). The default will write +sincedb files to some path matching "$HOME/.sincedb*" +Should be a path with filename not just a directory. + +[id="{version}-plugins-{type}s-{plugin}-temporary_directory"] +===== `temporary_directory` + + * Value type is <> + * Default value is `"/tmp/logstash"` + +Set the directory where logstash will store the tmp files before processing them. +default to the current OS temporary directory in linux /tmp/logstash + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/s3-v3.1.8.asciidoc b/docs/versioned-plugins/inputs/s3-v3.1.8.asciidoc new file mode 100644 index 000000000..d251d6064 --- /dev/null +++ b/docs/versioned-plugins/inputs/s3-v3.1.8.asciidoc @@ -0,0 +1,214 @@ +:plugin: s3 +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.8 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-s3/blob/v3.1.8/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== S3 input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from files from a S3 bucket. + +Each line from each file generates an event. +Files ending in `.gz` are handled as gzip'ed files. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== S3 Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] +===== `backup_add_prefix` + + * Value type is <> + * Default value is `nil` + +Append a prefix to the key (full path including file name in s3) after processing. +If backing up to another (or the same) bucket, this effectively lets you +choose a new 'folder' to place the files in + +[id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] +===== `backup_to_bucket` + + * Value type is <> + * Default value is `nil` + +Name of a S3 bucket to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] +===== `backup_to_dir` + + * Value type is <> + * Default value is `nil` + +Path of a local directory to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the S3 bucket. + +[id="{version}-plugins-{type}s-{plugin}-delete"] +===== `delete` + + * Value type is <> + * Default value is `false` + +Whether to delete processed files from the original bucket. + +[id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] +===== `exclude_pattern` + + * Value type is <> + * Default value is `nil` + +Ruby style regexp of keys to exclude from the bucket + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `60` + +Interval to wait between to check the file list again after a run is finished. +Value is in seconds. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is <> + * Default value is `nil` + +If specified, the prefix of filenames in the bucket must match (not a regexp) + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is <> + * Default value is `nil` + +Where to write the since database (keeps track of the date +the last handled file was added to S3). The default will write +sincedb files to some path matching "$HOME/.sincedb*" +Should be a path with filename not just a directory. + +[id="{version}-plugins-{type}s-{plugin}-temporary_directory"] +===== `temporary_directory` + + * Value type is <> + * Default value is `"/tmp/logstash"` + +Set the directory where logstash will store the tmp files before processing them. +default to the current OS temporary directory in linux /tmp/logstash + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/s3-v3.1.9.asciidoc b/docs/versioned-plugins/inputs/s3-v3.1.9.asciidoc new file mode 100644 index 000000000..7853a8d6f --- /dev/null +++ b/docs/versioned-plugins/inputs/s3-v3.1.9.asciidoc @@ -0,0 +1,215 @@ +:plugin: s3 +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.9 +:release_date: 2017-12-19 +:changelog_url: https://github.com/logstash-plugins/logstash-input-s3/blob/v3.1.9/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== S3 input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from files from a S3 bucket. + +Each line from each file generates an event. +Files ending in `.gz` are handled as gzip'ed files. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== S3 Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] +===== `backup_add_prefix` + + * Value type is <> + * Default value is `nil` + +Append a prefix to the key (full path including file name in s3) after processing. +If backing up to another (or the same) bucket, this effectively lets you +choose a new 'folder' to place the files in + +[id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] +===== `backup_to_bucket` + + * Value type is <> + * Default value is `nil` + +Name of a S3 bucket to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] +===== `backup_to_dir` + + * Value type is <> + * Default value is `nil` + +Path of a local directory to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the S3 bucket. + +[id="{version}-plugins-{type}s-{plugin}-delete"] +===== `delete` + + * Value type is <> + * Default value is `false` + +Whether to delete processed files from the original bucket. + +[id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] +===== `exclude_pattern` + + * Value type is <> + * Default value is `nil` + +Ruby style regexp of keys to exclude from the bucket + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `60` + +Interval to wait between to check the file list again after a run is finished. +Value is in seconds. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is <> + * Default value is `nil` + +If specified, the prefix of filenames in the bucket must match (not a regexp) + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is <> + * Default value is `nil` + +Where to write the since database (keeps track of the date +the last handled file was added to S3). The default will write +sincedb files to in the directory '{path.data}/plugins/inputs/s3/' + +If specified, this setting must be a filename path and not just a directory. + +[id="{version}-plugins-{type}s-{plugin}-temporary_directory"] +===== `temporary_directory` + + * Value type is <> + * Default value is `"/tmp/logstash"` + +Set the directory where logstash will store the tmp files before processing them. +default to the current OS temporary directory in linux /tmp/logstash + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/s3-v3.2.0.asciidoc b/docs/versioned-plugins/inputs/s3-v3.2.0.asciidoc new file mode 100644 index 000000000..3aca2d8c9 --- /dev/null +++ b/docs/versioned-plugins/inputs/s3-v3.2.0.asciidoc @@ -0,0 +1,215 @@ +:plugin: s3 +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.0 +:release_date: 2018-01-09 +:changelog_url: https://github.com/logstash-plugins/logstash-input-s3/blob/v3.2.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== S3 input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Stream events from files from a S3 bucket. + +Each line from each file generates an event. +Files ending in `.gz` are handled as gzip'ed files. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== S3 Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] +===== `backup_add_prefix` + + * Value type is <> + * Default value is `nil` + +Append a prefix to the key (full path including file name in s3) after processing. +If backing up to another (or the same) bucket, this effectively lets you +choose a new 'folder' to place the files in + +[id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] +===== `backup_to_bucket` + + * Value type is <> + * Default value is `nil` + +Name of a S3 bucket to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] +===== `backup_to_dir` + + * Value type is <> + * Default value is `nil` + +Path of a local directory to backup processed files to. + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the S3 bucket. + +[id="{version}-plugins-{type}s-{plugin}-delete"] +===== `delete` + + * Value type is <> + * Default value is `false` + +Whether to delete processed files from the original bucket. + +[id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] +===== `exclude_pattern` + + * Value type is <> + * Default value is `nil` + +Ruby style regexp of keys to exclude from the bucket + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `60` + +Interval to wait between to check the file list again after a run is finished. +Value is in seconds. + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is <> + * Default value is `nil` + +If specified, the prefix of filenames in the bucket must match (not a regexp) + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] +===== `sincedb_path` + + * Value type is <> + * Default value is `nil` + +Where to write the since database (keeps track of the date +the last handled file was added to S3). The default will write +sincedb files to in the directory '{path.data}/plugins/inputs/s3/' + +If specified, this setting must be a filename path and not just a directory. + +[id="{version}-plugins-{type}s-{plugin}-temporary_directory"] +===== `temporary_directory` + + * Value type is <> + * Default value is `"/tmp/logstash"` + +Set the directory where logstash will store the tmp files before processing them. +default to the current OS temporary directory in linux /tmp/logstash + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/s3sqs-index.asciidoc b/docs/versioned-plugins/inputs/s3sqs-index.asciidoc new file mode 100644 index 000000000..6f9f04b8b --- /dev/null +++ b/docs/versioned-plugins/inputs/s3sqs-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: s3sqs +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/inputs/salesforce-index.asciidoc b/docs/versioned-plugins/inputs/salesforce-index.asciidoc new file mode 100644 index 000000000..469202798 --- /dev/null +++ b/docs/versioned-plugins/inputs/salesforce-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: salesforce +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::salesforce-v3.0.3.asciidoc[] +include::salesforce-v3.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/salesforce-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/salesforce-v3.0.2.asciidoc new file mode 100644 index 000000000..3e692140c --- /dev/null +++ b/docs/versioned-plugins/inputs/salesforce-v3.0.2.asciidoc @@ -0,0 +1,199 @@ +:plugin: salesforce +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-salesforce/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Salesforce input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This Logstash input plugin allows you to query Salesforce using SOQL and puts the results +into Logstash, one row per event. You can configure it to pull entire sObjects or only +specific fields. + +NOTE: This input plugin will stop after all the results of the query are processed and will +need to be re-run to fetch new results. It does not utilize the streaming API. + +In order to use this plugin, you will need to create a new SFDC Application using +oauth. More details can be found here: +https://help.salesforce.com/apex/HTViewHelpDoc?id=connected_app_create.htm + +You will also need a username, password, and security token for your salesforce instance. +More details for generating a token can be found here: +https://help.salesforce.com/apex/HTViewHelpDoc?id=user_security_token.htm + +In addition to specifying an sObject, you can also supply a list of API fields +that will be used in the SOQL query. + +==== Example +This example prints all the Salesforce Opportunities to standard out + +[source,ruby] +---------------------------------- +input { + salesforce { + client_id => 'OAUTH CLIENT ID FROM YOUR SFDC APP' + client_secret => 'OAUTH CLIENT SECRET FROM YOUR SFDC APP' + username => 'email@example.com' + password => 'super-secret' + security_token => 'SECURITY TOKEN FOR THIS USER' + sfdc_object_name => 'Opportunity' + } +} + +output { + stdout { + codec => rubydebug + } +} +---------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Salesforce Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-api_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-client_secret>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-security_token>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-sfdc_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sfdc_filters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sfdc_object_name>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-to_underscores>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_test_sandbox>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-api_version"] +===== `api_version` + + * Value type is <> + * There is no default value for this setting. + +By default, this uses the default Restforce API version. +To override this, set this to something like "32.0" for example + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Consumer Key for authentication. You must set up a new SFDC +connected app with oath to use this output. More information +can be found here: +https://help.salesforce.com/apex/HTViewHelpDoc?id=connected_app_create.htm + +[id="{version}-plugins-{type}s-{plugin}-client_secret"] +===== `client_secret` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Consumer Secret from your oauth enabled connected app + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The password used to login to sfdc + +[id="{version}-plugins-{type}s-{plugin}-security_token"] +===== `security_token` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The security token for this account. For more information about +generting a security token, see: +https://help.salesforce.com/apex/HTViewHelpDoc?id=user_security_token.htm + +[id="{version}-plugins-{type}s-{plugin}-sfdc_fields"] +===== `sfdc_fields` + + * Value type is <> + * Default value is `[]` + +These are the field names to return in the Salesforce query +If this is empty, all fields are returned. + +[id="{version}-plugins-{type}s-{plugin}-sfdc_filters"] +===== `sfdc_filters` + + * Value type is <> + * Default value is `""` + +These options will be added to the WHERE clause in the +SOQL statement. Additional fields can be filtered on by +adding field1 = value1 AND field2 = value2 AND... + +[id="{version}-plugins-{type}s-{plugin}-sfdc_object_name"] +===== `sfdc_object_name` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the salesforce object you are creating or updating + +[id="{version}-plugins-{type}s-{plugin}-to_underscores"] +===== `to_underscores` + + * Value type is <> + * Default value is `false` + +Setting this to true will convert SFDC's NamedFields__c to named_fields__c + +[id="{version}-plugins-{type}s-{plugin}-use_test_sandbox"] +===== `use_test_sandbox` + + * Value type is <> + * Default value is `false` + +Set this to true to connect to a sandbox sfdc instance +logging in through test.salesforce.com + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A valid salesforce user name, usually your email address. +Used for authentication and will be the user all objects +are created or modified by + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/salesforce-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/salesforce-v3.0.3.asciidoc new file mode 100644 index 000000000..0cd53d62d --- /dev/null +++ b/docs/versioned-plugins/inputs/salesforce-v3.0.3.asciidoc @@ -0,0 +1,199 @@ +:plugin: salesforce +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-salesforce/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Salesforce input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This Logstash input plugin allows you to query Salesforce using SOQL and puts the results +into Logstash, one row per event. You can configure it to pull entire sObjects or only +specific fields. + +NOTE: This input plugin will stop after all the results of the query are processed and will +need to be re-run to fetch new results. It does not utilize the streaming API. + +In order to use this plugin, you will need to create a new SFDC Application using +oauth. More details can be found here: +https://help.salesforce.com/apex/HTViewHelpDoc?id=connected_app_create.htm + +You will also need a username, password, and security token for your salesforce instance. +More details for generating a token can be found here: +https://help.salesforce.com/apex/HTViewHelpDoc?id=user_security_token.htm + +In addition to specifying an sObject, you can also supply a list of API fields +that will be used in the SOQL query. + +==== Example +This example prints all the Salesforce Opportunities to standard out + +[source,ruby] +---------------------------------- +input { + salesforce { + client_id => 'OAUTH CLIENT ID FROM YOUR SFDC APP' + client_secret => 'OAUTH CLIENT SECRET FROM YOUR SFDC APP' + username => 'email@example.com' + password => 'super-secret' + security_token => 'SECURITY TOKEN FOR THIS USER' + sfdc_object_name => 'Opportunity' + } +} + +output { + stdout { + codec => rubydebug + } +} +---------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Salesforce Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-api_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-client_secret>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-security_token>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-sfdc_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sfdc_filters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sfdc_object_name>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-to_underscores>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_test_sandbox>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-api_version"] +===== `api_version` + + * Value type is <> + * There is no default value for this setting. + +By default, this uses the default Restforce API version. +To override this, set this to something like "32.0" for example + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Consumer Key for authentication. You must set up a new SFDC +connected app with oath to use this output. More information +can be found here: +https://help.salesforce.com/apex/HTViewHelpDoc?id=connected_app_create.htm + +[id="{version}-plugins-{type}s-{plugin}-client_secret"] +===== `client_secret` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Consumer Secret from your oauth enabled connected app + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The password used to login to sfdc + +[id="{version}-plugins-{type}s-{plugin}-security_token"] +===== `security_token` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The security token for this account. For more information about +generting a security token, see: +https://help.salesforce.com/apex/HTViewHelpDoc?id=user_security_token.htm + +[id="{version}-plugins-{type}s-{plugin}-sfdc_fields"] +===== `sfdc_fields` + + * Value type is <> + * Default value is `[]` + +These are the field names to return in the Salesforce query +If this is empty, all fields are returned. + +[id="{version}-plugins-{type}s-{plugin}-sfdc_filters"] +===== `sfdc_filters` + + * Value type is <> + * Default value is `""` + +These options will be added to the WHERE clause in the +SOQL statement. Additional fields can be filtered on by +adding field1 = value1 AND field2 = value2 AND... + +[id="{version}-plugins-{type}s-{plugin}-sfdc_object_name"] +===== `sfdc_object_name` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the salesforce object you are creating or updating + +[id="{version}-plugins-{type}s-{plugin}-to_underscores"] +===== `to_underscores` + + * Value type is <> + * Default value is `false` + +Setting this to true will convert SFDC's NamedFields__c to named_fields__c + +[id="{version}-plugins-{type}s-{plugin}-use_test_sandbox"] +===== `use_test_sandbox` + + * Value type is <> + * Default value is `false` + +Set this to true to connect to a sandbox sfdc instance +logging in through test.salesforce.com + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A valid salesforce user name, usually your email address. +Used for authentication and will be the user all objects +are created or modified by + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/snmptrap-index.asciidoc b/docs/versioned-plugins/inputs/snmptrap-index.asciidoc new file mode 100644 index 000000000..847b27946 --- /dev/null +++ b/docs/versioned-plugins/inputs/snmptrap-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: snmptrap +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::snmptrap-v3.0.5.asciidoc[] +include::snmptrap-v3.0.4.asciidoc[] +include::snmptrap-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/snmptrap-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/snmptrap-v3.0.3.asciidoc new file mode 100644 index 000000000..6184ba8b2 --- /dev/null +++ b/docs/versioned-plugins/inputs/snmptrap-v3.0.3.asciidoc @@ -0,0 +1,88 @@ +:plugin: snmptrap +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-snmptrap/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Snmptrap input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read snmp trap messages as events + +Resulting `@message` looks like : +[source,ruby] + #], @timestamp=#, @generic_trap=6, + @enterprise=[1.2.3.4.5.6], @source_ip="127.0.0.1", @agent_addr=#, + @specific_trap=99> + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Snmptrap Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-community>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-yamlmibdir>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-community"] +===== `community` + + * Value type is <> + * Default value is `"public"` + +SNMP Community String to listen for. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address to listen on + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `1062` + +The port to listen on. Remember that ports less than 1024 (privileged +ports) may require root to use. hence the default of 1062. + +[id="{version}-plugins-{type}s-{plugin}-yamlmibdir"] +===== `yamlmibdir` + + * Value type is <> + * There is no default value for this setting. + +directory of YAML MIB maps (same format ruby-snmp uses) + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/snmptrap-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/snmptrap-v3.0.4.asciidoc new file mode 100644 index 000000000..799f83190 --- /dev/null +++ b/docs/versioned-plugins/inputs/snmptrap-v3.0.4.asciidoc @@ -0,0 +1,88 @@ +:plugin: snmptrap +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-snmptrap/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Snmptrap input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read snmp trap messages as events + +Resulting `@message` looks like : +[source,ruby] + #], @timestamp=#, @generic_trap=6, + @enterprise=[1.2.3.4.5.6], @source_ip="127.0.0.1", @agent_addr=#, + @specific_trap=99> + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Snmptrap Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-community>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-yamlmibdir>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-community"] +===== `community` + + * Value type is <> + * Default value is `"public"` + +SNMP Community String to listen for. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address to listen on + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `1062` + +The port to listen on. Remember that ports less than 1024 (privileged +ports) may require root to use. hence the default of 1062. + +[id="{version}-plugins-{type}s-{plugin}-yamlmibdir"] +===== `yamlmibdir` + + * Value type is <> + * There is no default value for this setting. + +directory of YAML MIB maps (same format ruby-snmp uses) + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/snmptrap-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/snmptrap-v3.0.5.asciidoc new file mode 100644 index 000000000..10f0dfabf --- /dev/null +++ b/docs/versioned-plugins/inputs/snmptrap-v3.0.5.asciidoc @@ -0,0 +1,88 @@ +:plugin: snmptrap +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-snmptrap/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Snmptrap input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read snmp trap messages as events + +Resulting `@message` looks like : +[source,ruby] + #], @timestamp=#, @generic_trap=6, + @enterprise=[1.2.3.4.5.6], @source_ip="127.0.0.1", @agent_addr=#, + @specific_trap=99> + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Snmptrap Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-community>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-yamlmibdir>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-community"] +===== `community` + + * Value type is <> + * Default value is `"public"` + +SNMP Community String to listen for. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address to listen on + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `1062` + +The port to listen on. Remember that ports less than 1024 (privileged +ports) may require root to use. hence the default of 1062. + +[id="{version}-plugins-{type}s-{plugin}-yamlmibdir"] +===== `yamlmibdir` + + * Value type is <> + * There is no default value for this setting. + +directory of YAML MIB maps (same format ruby-snmp uses) + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/sqlite-index.asciidoc b/docs/versioned-plugins/inputs/sqlite-index.asciidoc new file mode 100644 index 000000000..e07d34307 --- /dev/null +++ b/docs/versioned-plugins/inputs/sqlite-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: sqlite +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::sqlite-v3.0.3.asciidoc[] +include::sqlite-v3.0.2.asciidoc[] +include::sqlite-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/sqlite-v3.0.1.asciidoc b/docs/versioned-plugins/inputs/sqlite-v3.0.1.asciidoc new file mode 100644 index 000000000..805744363 --- /dev/null +++ b/docs/versioned-plugins/inputs/sqlite-v3.0.1.asciidoc @@ -0,0 +1,124 @@ +:plugin: sqlite +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-sqlite/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Sqlite input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read rows from an sqlite database. + +This is most useful in cases where you are logging directly to a table. +Any tables being watched must have an `id` column that is monotonically +increasing. + +All tables are read by default except: + +* ones matching `sqlite_%` - these are internal/adminstrative tables for sqlite +* `since_table` - this is used by this plugin to track state. + +Example +[source,sql] + % sqlite /tmp/example.db + sqlite> CREATE TABLE weblogs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + ip STRING, + request STRING, + response INTEGER); + sqlite> INSERT INTO weblogs (ip, request, response) + VALUES ("1.2.3.4", "/index.html", 200); + +Then with this logstash config: +[source,ruby] + input { + sqlite { + path => "/tmp/example.db" + type => weblogs + } + } + output { + stdout { + debug => true + } + } + +Sample output: +[source,ruby] + { + "@source" => "sqlite://sadness/tmp/x.db", + "@tags" => [], + "@fields" => { + "ip" => "1.2.3.4", + "request" => "/index.html", + "response" => 200 + }, + "@timestamp" => "2013-05-29T06:16:30.850Z", + "@source_host" => "sadness", + "@source_path" => "/tmp/x.db", + "@message" => "", + "@type" => "foo" + } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sqlite Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_tables>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-batch"] +===== `batch` + + * Value type is <> + * Default value is `5` + +How many rows to fetch at a time from each `SELECT` call. + +[id="{version}-plugins-{type}s-{plugin}-exclude_tables"] +===== `exclude_tables` + + * Value type is <> + * Default value is `[]` + +Any tables to exclude by name. +By default all tables are followed. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The path to the sqlite database file. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/sqlite-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/sqlite-v3.0.2.asciidoc new file mode 100644 index 000000000..78cbe4ebf --- /dev/null +++ b/docs/versioned-plugins/inputs/sqlite-v3.0.2.asciidoc @@ -0,0 +1,124 @@ +:plugin: sqlite +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-sqlite/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Sqlite input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read rows from an sqlite database. + +This is most useful in cases where you are logging directly to a table. +Any tables being watched must have an `id` column that is monotonically +increasing. + +All tables are read by default except: + +* ones matching `sqlite_%` - these are internal/adminstrative tables for sqlite +* `since_table` - this is used by this plugin to track state. + +Example +[source,sql] + % sqlite /tmp/example.db + sqlite> CREATE TABLE weblogs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + ip STRING, + request STRING, + response INTEGER); + sqlite> INSERT INTO weblogs (ip, request, response) + VALUES ("1.2.3.4", "/index.html", 200); + +Then with this logstash config: +[source,ruby] + input { + sqlite { + path => "/tmp/example.db" + type => weblogs + } + } + output { + stdout { + debug => true + } + } + +Sample output: +[source,ruby] + { + "@source" => "sqlite://sadness/tmp/x.db", + "@tags" => [], + "@fields" => { + "ip" => "1.2.3.4", + "request" => "/index.html", + "response" => 200 + }, + "@timestamp" => "2013-05-29T06:16:30.850Z", + "@source_host" => "sadness", + "@source_path" => "/tmp/x.db", + "@message" => "", + "@type" => "foo" + } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sqlite Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_tables>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-batch"] +===== `batch` + + * Value type is <> + * Default value is `5` + +How many rows to fetch at a time from each `SELECT` call. + +[id="{version}-plugins-{type}s-{plugin}-exclude_tables"] +===== `exclude_tables` + + * Value type is <> + * Default value is `[]` + +Any tables to exclude by name. +By default all tables are followed. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The path to the sqlite database file. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/sqlite-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/sqlite-v3.0.3.asciidoc new file mode 100644 index 000000000..84e4522db --- /dev/null +++ b/docs/versioned-plugins/inputs/sqlite-v3.0.3.asciidoc @@ -0,0 +1,124 @@ +:plugin: sqlite +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-sqlite/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Sqlite input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read rows from an sqlite database. + +This is most useful in cases where you are logging directly to a table. +Any tables being watched must have an `id` column that is monotonically +increasing. + +All tables are read by default except: + +* ones matching `sqlite_%` - these are internal/adminstrative tables for sqlite +* `since_table` - this is used by this plugin to track state. + +Example +[source,sql] + % sqlite /tmp/example.db + sqlite> CREATE TABLE weblogs ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + ip STRING, + request STRING, + response INTEGER); + sqlite> INSERT INTO weblogs (ip, request, response) + VALUES ("1.2.3.4", "/index.html", 200); + +Then with this logstash config: +[source,ruby] + input { + sqlite { + path => "/tmp/example.db" + type => weblogs + } + } + output { + stdout { + debug => true + } + } + +Sample output: +[source,ruby] + { + "@source" => "sqlite://sadness/tmp/x.db", + "@tags" => [], + "@fields" => { + "ip" => "1.2.3.4", + "request" => "/index.html", + "response" => 200 + }, + "@timestamp" => "2013-05-29T06:16:30.850Z", + "@source_host" => "sadness", + "@source_path" => "/tmp/x.db", + "@message" => "", + "@type" => "foo" + } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sqlite Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_tables>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-batch"] +===== `batch` + + * Value type is <> + * Default value is `5` + +How many rows to fetch at a time from each `SELECT` call. + +[id="{version}-plugins-{type}s-{plugin}-exclude_tables"] +===== `exclude_tables` + + * Value type is <> + * Default value is `[]` + +Any tables to exclude by name. +By default all tables are followed. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The path to the sqlite database file. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/sqs-index.asciidoc b/docs/versioned-plugins/inputs/sqs-index.asciidoc new file mode 100644 index 000000000..560388a05 --- /dev/null +++ b/docs/versioned-plugins/inputs/sqs-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: sqs +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +| <> | 2017-05-04 +|======================================================================= + +include::sqs-v3.0.6.asciidoc[] +include::sqs-v3.0.5.asciidoc[] +include::sqs-v3.0.4.asciidoc[] +include::sqs-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/sqs-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/sqs-v3.0.3.asciidoc new file mode 100644 index 000000000..3665311f0 --- /dev/null +++ b/docs/versioned-plugins/inputs/sqs-v3.0.3.asciidoc @@ -0,0 +1,219 @@ +:plugin: sqs +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-05-04 +:changelog_url: https://github.com/logstash-plugins/logstash-input-sqs/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Sqs + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + +Pull events from an Amazon Web Services Simple Queue Service (SQS) queue. + +SQS is a simple, scalable queue system that is part of the +Amazon Web Services suite of tools. + +Although SQS is similar to other queuing systems like AMQP, it +uses a custom API and requires that you have an AWS account. +See http://aws.amazon.com/sqs/ for more details on how SQS works, +what the pricing schedule looks like and how to setup a queue. + +To use this plugin, you *must*: + + * Have an AWS account + * Setup an SQS queue + * Create an identify that has access to consume messages from the queue. + +The "consumer" identity must have the following permissions on the queue: + + * `sqs:ChangeMessageVisibility` + * `sqs:ChangeMessageVisibilityBatch` + * `sqs:DeleteMessage` + * `sqs:DeleteMessageBatch` + * `sqs:GetQueueAttributes` + * `sqs:GetQueueUrl` + * `sqs:ListQueues` + * `sqs:ReceiveMessage` + +Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user. +A sample policy is as follows: +[source,json] + { + "Statement": [ + { + "Action": [ + "sqs:ChangeMessageVisibility", + "sqs:ChangeMessageVisibilityBatch", + "sqs:GetQueueAttributes", + "sqs:GetQueueUrl", + "sqs:ListQueues", + "sqs:SendMessage", + "sqs:SendMessageBatch" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:sqs:us-east-1:123456789012:Logstash" + ] + } + ] + } + +See http://aws.amazon.com/iam/ for more details on setting up AWS identities. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sqs Input Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-id_field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-md5_field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-polling_frequency>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sent_timestamp_field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-id_field"] +===== `id_field` + + * Value type is <> + * There is no default value for this setting. + +Name of the event field in which to store the SQS message ID + +[id="{version}-plugins-{type}s-{plugin}-md5_field"] +===== `md5_field` + + * Value type is <> + * There is no default value for this setting. + +Name of the event field in which to store the SQS message MD5 checksum + +[id="{version}-plugins-{type}s-{plugin}-polling_frequency"] +===== `polling_frequency` + + * Value type is <> + * Default value is `20` + +Polling frequency, default is 20 seconds + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Name of the SQS Queue name to pull messages from. Note that this is just the name of the queue, not the URL or ARN. + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-sent_timestamp_field"] +===== `sent_timestamp_field` + + * Value type is <> + * There is no default value for this setting. + +Name of the event field in which to store the SQS message Sent Timestamp + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/sqs-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/sqs-v3.0.4.asciidoc new file mode 100644 index 000000000..51d7a3b8f --- /dev/null +++ b/docs/versioned-plugins/inputs/sqs-v3.0.4.asciidoc @@ -0,0 +1,220 @@ +:plugin: sqs +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-sqs/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Sqs input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + +Pull events from an Amazon Web Services Simple Queue Service (SQS) queue. + +SQS is a simple, scalable queue system that is part of the +Amazon Web Services suite of tools. + +Although SQS is similar to other queuing systems like AMQP, it +uses a custom API and requires that you have an AWS account. +See http://aws.amazon.com/sqs/ for more details on how SQS works, +what the pricing schedule looks like and how to setup a queue. + +To use this plugin, you *must*: + + * Have an AWS account + * Setup an SQS queue + * Create an identify that has access to consume messages from the queue. + +The "consumer" identity must have the following permissions on the queue: + + * `sqs:ChangeMessageVisibility` + * `sqs:ChangeMessageVisibilityBatch` + * `sqs:DeleteMessage` + * `sqs:DeleteMessageBatch` + * `sqs:GetQueueAttributes` + * `sqs:GetQueueUrl` + * `sqs:ListQueues` + * `sqs:ReceiveMessage` + +Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user. +A sample policy is as follows: +[source,json] + { + "Statement": [ + { + "Action": [ + "sqs:ChangeMessageVisibility", + "sqs:ChangeMessageVisibilityBatch", + "sqs:GetQueueAttributes", + "sqs:GetQueueUrl", + "sqs:ListQueues", + "sqs:SendMessage", + "sqs:SendMessageBatch" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:sqs:us-east-1:123456789012:Logstash" + ] + } + ] + } + +See http://aws.amazon.com/iam/ for more details on setting up AWS identities. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sqs Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-id_field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-md5_field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-polling_frequency>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sent_timestamp_field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-id_field"] +===== `id_field` + + * Value type is <> + * There is no default value for this setting. + +Name of the event field in which to store the SQS message ID + +[id="{version}-plugins-{type}s-{plugin}-md5_field"] +===== `md5_field` + + * Value type is <> + * There is no default value for this setting. + +Name of the event field in which to store the SQS message MD5 checksum + +[id="{version}-plugins-{type}s-{plugin}-polling_frequency"] +===== `polling_frequency` + + * Value type is <> + * Default value is `20` + +Polling frequency, default is 20 seconds + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Name of the SQS Queue name to pull messages from. Note that this is just the name of the queue, not the URL or ARN. + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-sent_timestamp_field"] +===== `sent_timestamp_field` + + * Value type is <> + * There is no default value for this setting. + +Name of the event field in which to store the SQS message Sent Timestamp + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/sqs-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/sqs-v3.0.5.asciidoc new file mode 100644 index 000000000..4a42a2a26 --- /dev/null +++ b/docs/versioned-plugins/inputs/sqs-v3.0.5.asciidoc @@ -0,0 +1,220 @@ +:plugin: sqs +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-sqs/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Sqs input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + +Pull events from an Amazon Web Services Simple Queue Service (SQS) queue. + +SQS is a simple, scalable queue system that is part of the +Amazon Web Services suite of tools. + +Although SQS is similar to other queuing systems like AMQP, it +uses a custom API and requires that you have an AWS account. +See http://aws.amazon.com/sqs/ for more details on how SQS works, +what the pricing schedule looks like and how to setup a queue. + +To use this plugin, you *must*: + + * Have an AWS account + * Setup an SQS queue + * Create an identify that has access to consume messages from the queue. + +The "consumer" identity must have the following permissions on the queue: + + * `sqs:ChangeMessageVisibility` + * `sqs:ChangeMessageVisibilityBatch` + * `sqs:DeleteMessage` + * `sqs:DeleteMessageBatch` + * `sqs:GetQueueAttributes` + * `sqs:GetQueueUrl` + * `sqs:ListQueues` + * `sqs:ReceiveMessage` + +Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user. +A sample policy is as follows: +[source,json] + { + "Statement": [ + { + "Action": [ + "sqs:ChangeMessageVisibility", + "sqs:ChangeMessageVisibilityBatch", + "sqs:GetQueueAttributes", + "sqs:GetQueueUrl", + "sqs:ListQueues", + "sqs:SendMessage", + "sqs:SendMessageBatch" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:sqs:us-east-1:123456789012:Logstash" + ] + } + ] + } + +See http://aws.amazon.com/iam/ for more details on setting up AWS identities. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sqs Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-id_field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-md5_field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-polling_frequency>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sent_timestamp_field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-id_field"] +===== `id_field` + + * Value type is <> + * There is no default value for this setting. + +Name of the event field in which to store the SQS message ID + +[id="{version}-plugins-{type}s-{plugin}-md5_field"] +===== `md5_field` + + * Value type is <> + * There is no default value for this setting. + +Name of the event field in which to store the SQS message MD5 checksum + +[id="{version}-plugins-{type}s-{plugin}-polling_frequency"] +===== `polling_frequency` + + * Value type is <> + * Default value is `20` + +Polling frequency, default is 20 seconds + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Name of the SQS Queue name to pull messages from. Note that this is just the name of the queue, not the URL or ARN. + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-sent_timestamp_field"] +===== `sent_timestamp_field` + + * Value type is <> + * There is no default value for this setting. + +Name of the event field in which to store the SQS message Sent Timestamp + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/sqs-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/sqs-v3.0.6.asciidoc new file mode 100644 index 000000000..e9334aebf --- /dev/null +++ b/docs/versioned-plugins/inputs/sqs-v3.0.6.asciidoc @@ -0,0 +1,220 @@ +:plugin: sqs +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-sqs/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Sqs input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + +Pull events from an Amazon Web Services Simple Queue Service (SQS) queue. + +SQS is a simple, scalable queue system that is part of the +Amazon Web Services suite of tools. + +Although SQS is similar to other queuing systems like AMQP, it +uses a custom API and requires that you have an AWS account. +See http://aws.amazon.com/sqs/ for more details on how SQS works, +what the pricing schedule looks like and how to setup a queue. + +To use this plugin, you *must*: + + * Have an AWS account + * Setup an SQS queue + * Create an identify that has access to consume messages from the queue. + +The "consumer" identity must have the following permissions on the queue: + + * `sqs:ChangeMessageVisibility` + * `sqs:ChangeMessageVisibilityBatch` + * `sqs:DeleteMessage` + * `sqs:DeleteMessageBatch` + * `sqs:GetQueueAttributes` + * `sqs:GetQueueUrl` + * `sqs:ListQueues` + * `sqs:ReceiveMessage` + +Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user. +A sample policy is as follows: +[source,json] + { + "Statement": [ + { + "Action": [ + "sqs:ChangeMessageVisibility", + "sqs:ChangeMessageVisibilityBatch", + "sqs:GetQueueAttributes", + "sqs:GetQueueUrl", + "sqs:ListQueues", + "sqs:SendMessage", + "sqs:SendMessageBatch" + ], + "Effect": "Allow", + "Resource": [ + "arn:aws:sqs:us-east-1:123456789012:Logstash" + ] + } + ] + } + +See http://aws.amazon.com/iam/ for more details on setting up AWS identities. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sqs Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-id_field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-md5_field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-polling_frequency>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sent_timestamp_field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-id_field"] +===== `id_field` + + * Value type is <> + * There is no default value for this setting. + +Name of the event field in which to store the SQS message ID + +[id="{version}-plugins-{type}s-{plugin}-md5_field"] +===== `md5_field` + + * Value type is <> + * There is no default value for this setting. + +Name of the event field in which to store the SQS message MD5 checksum + +[id="{version}-plugins-{type}s-{plugin}-polling_frequency"] +===== `polling_frequency` + + * Value type is <> + * Default value is `20` + +Polling frequency, default is 20 seconds + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Name of the SQS Queue name to pull messages from. Note that this is just the name of the queue, not the URL or ARN. + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-sent_timestamp_field"] +===== `sent_timestamp_field` + + * Value type is <> + * There is no default value for this setting. + +Name of the event field in which to store the SQS message Sent Timestamp + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/stdin-index.asciidoc b/docs/versioned-plugins/inputs/stdin-index.asciidoc new file mode 100644 index 000000000..18bb84826 --- /dev/null +++ b/docs/versioned-plugins/inputs/stdin-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: stdin +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::stdin-v3.2.5.asciidoc[] +include::stdin-v3.2.4.asciidoc[] +include::stdin-v3.2.3.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/stdin-v3.2.3.asciidoc b/docs/versioned-plugins/inputs/stdin-v3.2.3.asciidoc new file mode 100644 index 000000000..895ecac1f --- /dev/null +++ b/docs/versioned-plugins/inputs/stdin-v3.2.3.asciidoc @@ -0,0 +1,39 @@ +:plugin: stdin +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-stdin/blob/v3.2.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Stdin input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from standard input. + +By default, each event is assumed to be one line. If you +want to join lines, you'll want to use the multiline codec. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Stdin Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +|======================================================================= + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/stdin-v3.2.4.asciidoc b/docs/versioned-plugins/inputs/stdin-v3.2.4.asciidoc new file mode 100644 index 000000000..ab31da691 --- /dev/null +++ b/docs/versioned-plugins/inputs/stdin-v3.2.4.asciidoc @@ -0,0 +1,35 @@ +:plugin: stdin +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.4 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-stdin/blob/v3.2.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Stdin input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from standard input. + +By default, each event is assumed to be one line. If you +want to join lines, you'll want to use the multiline codec. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Stdin Input Configuration Options + +There are no special configuration options for this plugin, +but it does support the <<{version}-plugins-{type}s-{plugin}-common-options>>. + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/stdin-v3.2.5.asciidoc b/docs/versioned-plugins/inputs/stdin-v3.2.5.asciidoc new file mode 100644 index 000000000..33cc8917b --- /dev/null +++ b/docs/versioned-plugins/inputs/stdin-v3.2.5.asciidoc @@ -0,0 +1,35 @@ +:plugin: stdin +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-stdin/blob/v3.2.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Stdin input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events from standard input. + +By default, each event is assumed to be one line. If you +want to join lines, you'll want to use the multiline codec. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Stdin Input Configuration Options + +There are no special configuration options for this plugin, +but it does support the <<{version}-plugins-{type}s-{plugin}-common-options>>. + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/stomp-index.asciidoc b/docs/versioned-plugins/inputs/stomp-index.asciidoc new file mode 100644 index 000000000..2ecffd94b --- /dev/null +++ b/docs/versioned-plugins/inputs/stomp-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: stomp +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-09-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::stomp-v3.0.7.asciidoc[] +include::stomp-v3.0.6.asciidoc[] +include::stomp-v3.0.5.asciidoc[] +include::stomp-v3.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/stomp-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/stomp-v3.0.4.asciidoc new file mode 100644 index 000000000..4677dff6e --- /dev/null +++ b/docs/versioned-plugins/inputs/stomp-v3.0.4.asciidoc @@ -0,0 +1,119 @@ +:plugin: stomp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-stomp/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Stomp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Creates events received with the STOMP protocol. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Stomp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-destination"] +===== `destination` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The destination to read events from. + +Example: `/topic/logstash` + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * Default value is `"localhost"` + +The address of the STOMP server. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `""` + +The password to authenticate with. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `61613` + +The port to connet to on your STOMP server. + +[id="{version}-plugins-{type}s-{plugin}-reconnect"] +===== `reconnect` + + * Value type is <> + * Default value is `true` + +Auto reconnect + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `30` + + + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `""` + +The username to authenticate with. + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `nil` + +The vhost to use + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/stomp-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/stomp-v3.0.5.asciidoc new file mode 100644 index 000000000..65ae97efd --- /dev/null +++ b/docs/versioned-plugins/inputs/stomp-v3.0.5.asciidoc @@ -0,0 +1,119 @@ +:plugin: stomp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-stomp/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Stomp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Creates events received with the STOMP protocol. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Stomp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-destination"] +===== `destination` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The destination to read events from. + +Example: `/topic/logstash` + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * Default value is `"localhost"` + +The address of the STOMP server. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `""` + +The password to authenticate with. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `61613` + +The port to connet to on your STOMP server. + +[id="{version}-plugins-{type}s-{plugin}-reconnect"] +===== `reconnect` + + * Value type is <> + * Default value is `true` + +Auto reconnect + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `30` + + + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `""` + +The username to authenticate with. + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `nil` + +The vhost to use + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/stomp-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/stomp-v3.0.6.asciidoc new file mode 100644 index 000000000..27abce30b --- /dev/null +++ b/docs/versioned-plugins/inputs/stomp-v3.0.6.asciidoc @@ -0,0 +1,119 @@ +:plugin: stomp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-09-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-stomp/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Stomp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Creates events received with the STOMP protocol. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Stomp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-destination"] +===== `destination` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The destination to read events from. + +Example: `/topic/logstash` + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * Default value is `"localhost"` + +The address of the STOMP server. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `""` + +The password to authenticate with. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `61613` + +The port to connet to on your STOMP server. + +[id="{version}-plugins-{type}s-{plugin}-reconnect"] +===== `reconnect` + + * Value type is <> + * Default value is `true` + +Auto reconnect + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `30` + + + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `""` + +The username to authenticate with. + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `nil` + +The vhost to use + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/stomp-v3.0.7.asciidoc b/docs/versioned-plugins/inputs/stomp-v3.0.7.asciidoc new file mode 100644 index 000000000..1443b72f6 --- /dev/null +++ b/docs/versioned-plugins/inputs/stomp-v3.0.7.asciidoc @@ -0,0 +1,119 @@ +:plugin: stomp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.7 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-stomp/blob/v3.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Stomp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Creates events received with the STOMP protocol. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Stomp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-destination"] +===== `destination` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The destination to read events from. + +Example: `/topic/logstash` + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * Default value is `"localhost"` + +The address of the STOMP server. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `""` + +The password to authenticate with. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `61613` + +The port to connet to on your STOMP server. + +[id="{version}-plugins-{type}s-{plugin}-reconnect"] +===== `reconnect` + + * Value type is <> + * Default value is `true` + +Auto reconnect + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `30` + + + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `""` + +The username to authenticate with. + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `nil` + +The vhost to use + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/syslog-index.asciidoc b/docs/versioned-plugins/inputs/syslog-index.asciidoc new file mode 100644 index 000000000..6cb1c6ec2 --- /dev/null +++ b/docs/versioned-plugins/inputs/syslog-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: syslog +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-06 +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::syslog-v3.2.4.asciidoc[] +include::syslog-v3.2.3.asciidoc[] +include::syslog-v3.2.2.asciidoc[] +include::syslog-v3.2.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/syslog-v3.2.1.asciidoc b/docs/versioned-plugins/inputs/syslog-v3.2.1.asciidoc new file mode 100644 index 000000000..99c01fe15 --- /dev/null +++ b/docs/versioned-plugins/inputs/syslog-v3.2.1.asciidoc @@ -0,0 +1,144 @@ +:plugin: syslog +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-syslog/blob/v3.2.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Syslog input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read syslog messages as events over the network. + +This input is a good choice if you already use syslog today. +It is also a good choice if you want to receive logs from +appliances and network devices where you cannot run your own +log collector. + +Of course, 'syslog' is a very muddy term. This input only supports `RFC3164` +syslog with some small modifications. The date format is allowed to be +`RFC3164` style or `ISO8601`. Otherwise the rest of `RFC3164` must be obeyed. +If you do not use `RFC3164`, do not use this input. + +For more information see the http://www.ietf.org/rfc/rfc3164.txt[RFC3164 page]. + +Note: This input will start listeners on both TCP and UDP. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Syslog Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-facility_labels>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-locale>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-severity_labels>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timezone>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-facility_labels"] +===== `facility_labels` + + * Value type is <> + * Default value is `["kernel", "user-level", "mail", "system", "security/authorization", "syslogd", "line printer", "network news", "UUCP", "clock", "security/authorization", "FTP", "NTP", "log audit", "log alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"]` + +Labels for facility levels. These are defined in RFC3164. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-locale"] +===== `locale` + + * Value type is <> + * There is no default value for this setting. + +Specify a locale to be used for date parsing using either IETF-BCP47 or POSIX language tag. +Simple examples are `en`,`en-US` for BCP47 or `en_US` for POSIX. +If not specified, the platform default will be used. + +The locale is mostly necessary to be set for parsing month names (pattern with MMM) and +weekday names (pattern with EEE). + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `514` + +The port to listen on. Remember that ports less than 1024 (privileged +ports) may require root to use. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-severity_labels"] +===== `severity_labels` + + * Value type is <> + * Default value is `["Emergency", "Alert", "Critical", "Error", "Warning", "Notice", "Informational", "Debug"]` + +Labels for severity levels. These are defined in RFC3164. + +[id="{version}-plugins-{type}s-{plugin}-timezone"] +===== `timezone` + + * Value type is <> + * There is no default value for this setting. + +Specify a time zone canonical ID to be used for date parsing. +The valid IDs are listed on the [Joda.org available time zones page](http://joda-time.sourceforge.net/timezones.html). +This is useful in case the time zone cannot be extracted from the value, +and is not the platform default. +If this is not specified the platform default will be used. +Canonical ID is good as it takes care of daylight saving time for you +For example, `America/Los_Angeles` or `Europe/France` are valid IDs. + +[id="{version}-plugins-{type}s-{plugin}-use_labels"] +===== `use_labels` + + * Value type is <> + * Default value is `true` + +Use label parsing for severity and facility levels. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/syslog-v3.2.2.asciidoc b/docs/versioned-plugins/inputs/syslog-v3.2.2.asciidoc new file mode 100644 index 000000000..bd41dc3e5 --- /dev/null +++ b/docs/versioned-plugins/inputs/syslog-v3.2.2.asciidoc @@ -0,0 +1,144 @@ +:plugin: syslog +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.2 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-syslog/blob/v3.2.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Syslog input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read syslog messages as events over the network. + +This input is a good choice if you already use syslog today. +It is also a good choice if you want to receive logs from +appliances and network devices where you cannot run your own +log collector. + +Of course, 'syslog' is a very muddy term. This input only supports `RFC3164` +syslog with some small modifications. The date format is allowed to be +`RFC3164` style or `ISO8601`. Otherwise the rest of `RFC3164` must be obeyed. +If you do not use `RFC3164`, do not use this input. + +For more information see the http://www.ietf.org/rfc/rfc3164.txt[RFC3164 page]. + +Note: This input will start listeners on both TCP and UDP. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Syslog Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-facility_labels>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-locale>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-severity_labels>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timezone>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-facility_labels"] +===== `facility_labels` + + * Value type is <> + * Default value is `["kernel", "user-level", "mail", "system", "security/authorization", "syslogd", "line printer", "network news", "UUCP", "clock", "security/authorization", "FTP", "NTP", "log audit", "log alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"]` + +Labels for facility levels. These are defined in RFC3164. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-locale"] +===== `locale` + + * Value type is <> + * There is no default value for this setting. + +Specify a locale to be used for date parsing using either IETF-BCP47 or POSIX language tag. +Simple examples are `en`,`en-US` for BCP47 or `en_US` for POSIX. +If not specified, the platform default will be used. + +The locale is mostly necessary to be set for parsing month names (pattern with MMM) and +weekday names (pattern with EEE). + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `514` + +The port to listen on. Remember that ports less than 1024 (privileged +ports) may require root to use. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-severity_labels"] +===== `severity_labels` + + * Value type is <> + * Default value is `["Emergency", "Alert", "Critical", "Error", "Warning", "Notice", "Informational", "Debug"]` + +Labels for severity levels. These are defined in RFC3164. + +[id="{version}-plugins-{type}s-{plugin}-timezone"] +===== `timezone` + + * Value type is <> + * There is no default value for this setting. + +Specify a time zone canonical ID to be used for date parsing. +The valid IDs are listed on the [Joda.org available time zones page](http://joda-time.sourceforge.net/timezones.html). +This is useful in case the time zone cannot be extracted from the value, +and is not the platform default. +If this is not specified the platform default will be used. +Canonical ID is good as it takes care of daylight saving time for you +For example, `America/Los_Angeles` or `Europe/France` are valid IDs. + +[id="{version}-plugins-{type}s-{plugin}-use_labels"] +===== `use_labels` + + * Value type is <> + * Default value is `true` + +Use label parsing for severity and facility levels. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/syslog-v3.2.3.asciidoc b/docs/versioned-plugins/inputs/syslog-v3.2.3.asciidoc new file mode 100644 index 000000000..02f0b3195 --- /dev/null +++ b/docs/versioned-plugins/inputs/syslog-v3.2.3.asciidoc @@ -0,0 +1,144 @@ +:plugin: syslog +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-syslog/blob/v3.2.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Syslog input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read syslog messages as events over the network. + +This input is a good choice if you already use syslog today. +It is also a good choice if you want to receive logs from +appliances and network devices where you cannot run your own +log collector. + +Of course, 'syslog' is a very muddy term. This input only supports `RFC3164` +syslog with some small modifications. The date format is allowed to be +`RFC3164` style or `ISO8601`. Otherwise the rest of `RFC3164` must be obeyed. +If you do not use `RFC3164`, do not use this input. + +For more information see the http://www.ietf.org/rfc/rfc3164.txt[RFC3164 page]. + +Note: This input will start listeners on both TCP and UDP. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Syslog Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-facility_labels>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-locale>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-severity_labels>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timezone>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-facility_labels"] +===== `facility_labels` + + * Value type is <> + * Default value is `["kernel", "user-level", "mail", "system", "security/authorization", "syslogd", "line printer", "network news", "UUCP", "clock", "security/authorization", "FTP", "NTP", "log audit", "log alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"]` + +Labels for facility levels. These are defined in RFC3164. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-locale"] +===== `locale` + + * Value type is <> + * There is no default value for this setting. + +Specify a locale to be used for date parsing using either IETF-BCP47 or POSIX language tag. +Simple examples are `en`,`en-US` for BCP47 or `en_US` for POSIX. +If not specified, the platform default will be used. + +The locale is mostly necessary to be set for parsing month names (pattern with MMM) and +weekday names (pattern with EEE). + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `514` + +The port to listen on. Remember that ports less than 1024 (privileged +ports) may require root to use. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-severity_labels"] +===== `severity_labels` + + * Value type is <> + * Default value is `["Emergency", "Alert", "Critical", "Error", "Warning", "Notice", "Informational", "Debug"]` + +Labels for severity levels. These are defined in RFC3164. + +[id="{version}-plugins-{type}s-{plugin}-timezone"] +===== `timezone` + + * Value type is <> + * There is no default value for this setting. + +Specify a time zone canonical ID to be used for date parsing. +The valid IDs are listed on the [Joda.org available time zones page](http://joda-time.sourceforge.net/timezones.html). +This is useful in case the time zone cannot be extracted from the value, +and is not the platform default. +If this is not specified the platform default will be used. +Canonical ID is good as it takes care of daylight saving time for you +For example, `America/Los_Angeles` or `Europe/France` are valid IDs. + +[id="{version}-plugins-{type}s-{plugin}-use_labels"] +===== `use_labels` + + * Value type is <> + * Default value is `true` + +Use label parsing for severity and facility levels. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/syslog-v3.2.4.asciidoc b/docs/versioned-plugins/inputs/syslog-v3.2.4.asciidoc new file mode 100644 index 000000000..faa8a559f --- /dev/null +++ b/docs/versioned-plugins/inputs/syslog-v3.2.4.asciidoc @@ -0,0 +1,144 @@ +:plugin: syslog +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.4 +:release_date: 2017-12-06 +:changelog_url: https://github.com/logstash-plugins/logstash-input-syslog/blob/v3.2.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Syslog input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read syslog messages as events over the network. + +This input is a good choice if you already use syslog today. +It is also a good choice if you want to receive logs from +appliances and network devices where you cannot run your own +log collector. + +Of course, 'syslog' is a very muddy term. This input only supports `RFC3164` +syslog with some small modifications. The date format is allowed to be +`RFC3164` style or `ISO8601`. Otherwise the rest of `RFC3164` must be obeyed. +If you do not use `RFC3164`, do not use this input. + +For more information see the http://www.ietf.org/rfc/rfc3164.txt[RFC3164 page]. + +Note: This input will start listeners on both TCP and UDP. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Syslog Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-facility_labels>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-locale>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-severity_labels>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timezone>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-facility_labels"] +===== `facility_labels` + + * Value type is <> + * Default value is `["kernel", "user-level", "mail", "system", "security/authorization", "syslogd", "line printer", "network news", "UUCP", "clock", "security/authorization", "FTP", "NTP", "log audit", "log alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"]` + +Labels for facility levels. These are defined in RFC3164. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address to listen on. + +[id="{version}-plugins-{type}s-{plugin}-locale"] +===== `locale` + + * Value type is <> + * There is no default value for this setting. + +Specify a locale to be used for date parsing using either IETF-BCP47 or POSIX language tag. +Simple examples are `en`,`en-US` for BCP47 or `en_US` for POSIX. +If not specified, the platform default will be used. + +The locale is mostly necessary to be set for parsing month names (pattern with MMM) and +weekday names (pattern with EEE). + + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `514` + +The port to listen on. Remember that ports less than 1024 (privileged +ports) may require root to use. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-severity_labels"] +===== `severity_labels` + + * Value type is <> + * Default value is `["Emergency", "Alert", "Critical", "Error", "Warning", "Notice", "Informational", "Debug"]` + +Labels for severity levels. These are defined in RFC3164. + +[id="{version}-plugins-{type}s-{plugin}-timezone"] +===== `timezone` + + * Value type is <> + * There is no default value for this setting. + +Specify a time zone canonical ID to be used for date parsing. +The valid IDs are listed on the [Joda.org available time zones page](http://joda-time.sourceforge.net/timezones.html). +This is useful in case the time zone cannot be extracted from the value, +and is not the platform default. +If this is not specified the platform default will be used. +Canonical ID is good as it takes care of daylight saving time for you +For example, `America/Los_Angeles` or `Europe/France` are valid IDs. + +[id="{version}-plugins-{type}s-{plugin}-use_labels"] +===== `use_labels` + + * Value type is <> + * Default value is `true` + +Use label parsing for severity and facility levels. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/tcp-index.asciidoc b/docs/versioned-plugins/inputs/tcp-index.asciidoc new file mode 100644 index 000000000..270e51dd1 --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-index.asciidoc @@ -0,0 +1,26 @@ +:plugin: tcp +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-27 +| <> | 2017-08-30 +| <> | 2017-08-16 +| <> | 2017-08-04 +| <> | 2017-08-30 +| <> | 2017-08-18 +| <> | 2017-06-30 +| <> | 2017-06-23 +|======================================================================= + +include::tcp-v5.0.3.asciidoc[] +include::tcp-v5.0.2.asciidoc[] +include::tcp-v5.0.1.asciidoc[] +include::tcp-v5.0.0.asciidoc[] +include::tcp-v4.2.4.asciidoc[] +include::tcp-v4.2.3.asciidoc[] +include::tcp-v4.2.2.asciidoc[] +include::tcp-v4.1.2.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/tcp-v4.1.2.asciidoc b/docs/versioned-plugins/inputs/tcp-v4.1.2.asciidoc new file mode 100644 index 000000000..3f504273f --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-v4.1.2.asciidoc @@ -0,0 +1,205 @@ +:plugin: tcp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v4.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Tcp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +#### Accepting log4j2 logs + +Log4j2 can send JSON over a socket, and we can use that combined with our tcp +input to accept the logs. + +First, we need to configure your application to send logs in JSON over a +socket. The following log4j2.xml accomplishes this task. + +Note, you will want to change the `host` and `port` settings in this +configuration to match your needs. + + + + + + + + + + + + + + +To accept this in Logstash, you will want tcp input and a date filter: + + input { + tcp { + port => 12345 + codec => json + } + } + +and add a date filter to take log4j2's `timeMillis` field and use it as the +event timestamp + + filter { + date { + match => [ "timeMillis", "UNIX_MS" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-data_timeout"] +===== `data_timeout` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `-1` + + + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is <> + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/tcp-v4.2.2.asciidoc b/docs/versioned-plugins/inputs/tcp-v4.2.2.asciidoc new file mode 100644 index 000000000..d31d5b8f1 --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-v4.2.2.asciidoc @@ -0,0 +1,205 @@ +:plugin: tcp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.2.2 +:release_date: 2017-06-30 +:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v4.2.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +#### Accepting log4j2 logs + +Log4j2 can send JSON over a socket, and we can use that combined with our tcp +input to accept the logs. + +First, we need to configure your application to send logs in JSON over a +socket. The following log4j2.xml accomplishes this task. + +Note, you will want to change the `host` and `port` settings in this +configuration to match your needs. + + + + + + + + + + + + + + +To accept this in Logstash, you will want tcp input and a date filter: + + input { + tcp { + port => 12345 + codec => json + } + } + +and add a date filter to take log4j2's `timeMillis` field and use it as the +event timestamp + + filter { + date { + match => [ "timeMillis", "UNIX_MS" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-data_timeout"] +===== `data_timeout` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `-1` + + + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is <> + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/tcp-v4.2.3.asciidoc b/docs/versioned-plugins/inputs/tcp-v4.2.3.asciidoc new file mode 100644 index 000000000..3e204aee5 --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-v4.2.3.asciidoc @@ -0,0 +1,205 @@ +:plugin: tcp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.2.3 +:release_date: 2017-08-18 +:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v4.2.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +#### Accepting log4j2 logs + +Log4j2 can send JSON over a socket, and we can use that combined with our tcp +input to accept the logs. + +First, we need to configure your application to send logs in JSON over a +socket. The following log4j2.xml accomplishes this task. + +Note, you will want to change the `host` and `port` settings in this +configuration to match your needs. + + + + + + + + + + + + + + +To accept this in Logstash, you will want tcp input and a date filter: + + input { + tcp { + port => 12345 + codec => json + } + } + +and add a date filter to take log4j2's `timeMillis` field and use it as the +event timestamp + + filter { + date { + match => [ "timeMillis", "UNIX_MS" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-data_timeout"] +===== `data_timeout` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `-1` + + + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is <> + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/tcp-v4.2.4.asciidoc b/docs/versioned-plugins/inputs/tcp-v4.2.4.asciidoc new file mode 100644 index 000000000..91a9a7b55 --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-v4.2.4.asciidoc @@ -0,0 +1,205 @@ +:plugin: tcp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.2.4 +:release_date: 2017-08-30 +:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v4.2.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +#### Accepting log4j2 logs + +Log4j2 can send JSON over a socket, and we can use that combined with our tcp +input to accept the logs. + +First, we need to configure your application to send logs in JSON over a +socket. The following log4j2.xml accomplishes this task. + +Note, you will want to change the `host` and `port` settings in this +configuration to match your needs. + + + + + + + + + + + + + + +To accept this in Logstash, you will want tcp input and a date filter: + + input { + tcp { + port => 12345 + codec => json + } + } + +and add a date filter to take log4j2's `timeMillis` field and use it as the +event timestamp + + filter { + date { + match => [ "timeMillis", "UNIX_MS" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-data_timeout"] +===== `data_timeout` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `-1` + + + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is <> + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/tcp-v5.0.0.asciidoc b/docs/versioned-plugins/inputs/tcp-v5.0.0.asciidoc new file mode 100644 index 000000000..9876455c5 --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-v5.0.0.asciidoc @@ -0,0 +1,187 @@ +:plugin: tcp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.0 +:release_date: 2017-08-04 +:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v5.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +#### Accepting log4j2 logs + +Log4j2 can send JSON over a socket, and we can use that combined with our tcp +input to accept the logs. + +First, we need to configure your application to send logs in JSON over a +socket. The following log4j2.xml accomplishes this task. + +Note, you will want to change the `host` and `port` settings in this +configuration to match your needs. + + + + + + + + + + + + + + +To accept this in Logstash, you will want tcp input and a date filter: + + input { + tcp { + port => 12345 + codec => json + } + } + +and add a date filter to take log4j2's `timeMillis` field and use it as the +event timestamp + + filter { + date { + match => [ "timeMillis", "UNIX_MS" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is <> + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/tcp-v5.0.1.asciidoc b/docs/versioned-plugins/inputs/tcp-v5.0.1.asciidoc new file mode 100644 index 000000000..dbccd54b2 --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-v5.0.1.asciidoc @@ -0,0 +1,187 @@ +:plugin: tcp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.1 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v5.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +#### Accepting log4j2 logs + +Log4j2 can send JSON over a socket, and we can use that combined with our tcp +input to accept the logs. + +First, we need to configure your application to send logs in JSON over a +socket. The following log4j2.xml accomplishes this task. + +Note, you will want to change the `host` and `port` settings in this +configuration to match your needs. + + + + + + + + + + + + + + +To accept this in Logstash, you will want tcp input and a date filter: + + input { + tcp { + port => 12345 + codec => json + } + } + +and add a date filter to take log4j2's `timeMillis` field and use it as the +event timestamp + + filter { + date { + match => [ "timeMillis", "UNIX_MS" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is <> + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/tcp-v5.0.2.asciidoc b/docs/versioned-plugins/inputs/tcp-v5.0.2.asciidoc new file mode 100644 index 000000000..7ec7440c3 --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-v5.0.2.asciidoc @@ -0,0 +1,187 @@ +:plugin: tcp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.2 +:release_date: 2017-08-30 +:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v5.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +#### Accepting log4j2 logs + +Log4j2 can send JSON over a socket, and we can use that combined with our tcp +input to accept the logs. + +First, we need to configure your application to send logs in JSON over a +socket. The following log4j2.xml accomplishes this task. + +Note, you will want to change the `host` and `port` settings in this +configuration to match your needs. + + + + + + + + + + + + + + +To accept this in Logstash, you will want tcp input and a date filter: + + input { + tcp { + port => 12345 + codec => json + } + } + +and add a date filter to take log4j2's `timeMillis` field and use it as the +event timestamp + + filter { + date { + match => [ "timeMillis", "UNIX_MS" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is <> + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/tcp-v5.0.3.asciidoc b/docs/versioned-plugins/inputs/tcp-v5.0.3.asciidoc new file mode 100644 index 000000000..9ae075697 --- /dev/null +++ b/docs/versioned-plugins/inputs/tcp-v5.0.3.asciidoc @@ -0,0 +1,187 @@ +:plugin: tcp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.3 +:release_date: 2017-12-27 +:changelog_url: https://github.com/logstash-plugins/logstash-input-tcp/blob/v5.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a TCP socket. + +Like stdin and file inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +#### Accepting log4j2 logs + +Log4j2 can send JSON over a socket, and we can use that combined with our tcp +input to accept the logs. + +First, we need to configure your application to send logs in JSON over a +socket. The following log4j2.xml accomplishes this task. + +Note, you will want to change the `host` and `port` settings in this +configuration to match your needs. + + + + + + + + + + + + + + +To accept this in Logstash, you will want tcp input and a date filter: + + input { + tcp { + port => 12345 + codec => json + } + } + +and add a date filter to take log4j2's `timeMillis` field and use it as the +event timestamp + + filter { + date { + match => [ "timeMillis", "UNIX_MS" ] + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] +===== `proxy_protocol` + + * Value type is <> + * Default value is `false` + +Proxy protocol support, only v1 is supported at this time +http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] +===== `ssl_extra_chain_certs` + + * Value type is <> + * Default value is `[]` + +An Array of extra X509 certificates to be added to the certificate chain. +Useful when the CA chain is not necessary in the system store. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `true` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/twitter-index.asciidoc b/docs/versioned-plugins/inputs/twitter-index.asciidoc new file mode 100644 index 000000000..e2387b6f8 --- /dev/null +++ b/docs/versioned-plugins/inputs/twitter-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: twitter +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +| <> | 2017-05-08 +|======================================================================= + +include::twitter-v3.0.7.asciidoc[] +include::twitter-v3.0.6.asciidoc[] +include::twitter-v3.0.5.asciidoc[] +include::twitter-v3.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/twitter-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/twitter-v3.0.4.asciidoc new file mode 100644 index 000000000..dc5207e0b --- /dev/null +++ b/docs/versioned-plugins/inputs/twitter-v3.0.4.asciidoc @@ -0,0 +1,225 @@ +:plugin: twitter +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-05-08 +:changelog_url: https://github.com/logstash-plugins/logstash-input-twitter/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Twitter + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Ingest events from the Twitter Streaming API. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Twitter Input Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-consumer_key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-consumer_secret>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-follows>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-full_tweet>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ignore_retweets>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keywords>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-languages>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-locations>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-oauth_token>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-oauth_token_secret>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_address>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-rate_limit_reset_in>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_samples>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-consumer_key"] +===== `consumer_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Twitter App's consumer key + +Don't know what this is? You need to create an "application" +on Twitter, see this url: + +[id="{version}-plugins-{type}s-{plugin}-consumer_secret"] +===== `consumer_secret` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Twitter App's consumer secret + +If you don't have one of these, you can create one by +registering a new application with Twitter: + + +[id="{version}-plugins-{type}s-{plugin}-follows"] +===== `follows` + + * Value type is <> + * There is no default value for this setting. + +A comma separated list of user IDs, indicating the users to +return statuses for in the Twitter stream. +See https://dev.twitter.com/streaming/overview/request-parameters#follow +for more details. + +[id="{version}-plugins-{type}s-{plugin}-full_tweet"] +===== `full_tweet` + + * Value type is <> + * Default value is `false` + +Record full tweet object as given to us by the Twitter Streaming API. + +[id="{version}-plugins-{type}s-{plugin}-ignore_retweets"] +===== `ignore_retweets` + + * Value type is <> + * Default value is `false` + +Lets you ingore the retweets coming out of the Twitter API. Default => false + +[id="{version}-plugins-{type}s-{plugin}-keywords"] +===== `keywords` + + * Value type is <> + * There is no default value for this setting. + +Any keywords to track in the Twitter stream. For multiple keywords, use +the syntax ["foo", "bar"]. There's a logical OR between each keyword +string listed and a logical AND between words separated by spaces per +keyword string. +See https://dev.twitter.com/streaming/overview/request-parameters#track +for more details. + +The wildcard "*" option is not supported. To ingest a sample stream of +all tweets, the use_samples option is recommended. + +[id="{version}-plugins-{type}s-{plugin}-languages"] +===== `languages` + + * Value type is <> + * There is no default value for this setting. + +A list of BCP 47 language identifiers corresponding to any of the languages listed +on Twitter’s advanced search page will only return tweets that have been detected +as being written in the specified languages. + +[id="{version}-plugins-{type}s-{plugin}-locations"] +===== `locations` + + * Value type is <> + * There is no default value for this setting. + +A comma-separated list of longitude, latitude pairs specifying a set +of bounding boxes to filter tweets by. +See https://dev.twitter.com/streaming/overview/request-parameters#locations +for more details. + +[id="{version}-plugins-{type}s-{plugin}-oauth_token"] +===== `oauth_token` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your oauth token. + +To get this, login to Twitter with whatever account you want, +then visit + +Click on your app (used with the consumer_key and consumer_secret settings) +Then at the bottom of the page, click 'Create my access token' which +will create an oauth token and secret bound to your account and that +application. + +[id="{version}-plugins-{type}s-{plugin}-oauth_token_secret"] +===== `oauth_token_secret` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your oauth token secret. + +To get this, login to Twitter with whatever account you want, +then visit + +Click on your app (used with the consumer_key and consumer_secret settings) +Then at the bottom of the page, click 'Create my access token' which +will create an oauth token and secret bound to your account and that +application. + +[id="{version}-plugins-{type}s-{plugin}-proxy_address"] +===== `proxy_address` + + * Value type is <> + * Default value is `"127.0.0.1"` + +Location of the proxy, by default the same machine as the one running this LS instance + +[id="{version}-plugins-{type}s-{plugin}-proxy_port"] +===== `proxy_port` + + * Value type is <> + * Default value is `3128` + +Port where the proxy is listening, by default 3128 (squid) + +[id="{version}-plugins-{type}s-{plugin}-rate_limit_reset_in"] +===== `rate_limit_reset_in` + + * Value type is <> + * Default value is `300` + +Duration in seconds to wait before retrying a connection when twitter responds with a 429 TooManyRequests +In some cases the 'x-rate-limit-reset' header is not set in the response and .rate_limit.reset_in +is nil. If this occurs then we use the integer specified here. The default is 5 minutes. + +[id="{version}-plugins-{type}s-{plugin}-use_proxy"] +===== `use_proxy` + + * Value type is <> + * Default value is `false` + +When to use a proxy to handle the connections + +[id="{version}-plugins-{type}s-{plugin}-use_samples"] +===== `use_samples` + + * Value type is <> + * Default value is `false` + +Returns a small random sample of all public statuses. The tweets returned +by the default access level are the same, so if two different clients connect +to this endpoint, they will see the same tweets. If set to true, the keywords, +follows, locations, and languages options will be ignored. Default => false + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/twitter-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/twitter-v3.0.5.asciidoc new file mode 100644 index 000000000..92caaee25 --- /dev/null +++ b/docs/versioned-plugins/inputs/twitter-v3.0.5.asciidoc @@ -0,0 +1,226 @@ +:plugin: twitter +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-twitter/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Twitter input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Ingest events from the Twitter Streaming API. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Twitter Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-consumer_key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-consumer_secret>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-follows>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-full_tweet>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ignore_retweets>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keywords>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-languages>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-locations>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-oauth_token>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-oauth_token_secret>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_address>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-rate_limit_reset_in>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_samples>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-consumer_key"] +===== `consumer_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Twitter App's consumer key + +Don't know what this is? You need to create an "application" +on Twitter, see this url: + +[id="{version}-plugins-{type}s-{plugin}-consumer_secret"] +===== `consumer_secret` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Twitter App's consumer secret + +If you don't have one of these, you can create one by +registering a new application with Twitter: + + +[id="{version}-plugins-{type}s-{plugin}-follows"] +===== `follows` + + * Value type is <> + * There is no default value for this setting. + +A comma separated list of user IDs, indicating the users to +return statuses for in the Twitter stream. +See https://dev.twitter.com/streaming/overview/request-parameters#follow +for more details. + +[id="{version}-plugins-{type}s-{plugin}-full_tweet"] +===== `full_tweet` + + * Value type is <> + * Default value is `false` + +Record full tweet object as given to us by the Twitter Streaming API. + +[id="{version}-plugins-{type}s-{plugin}-ignore_retweets"] +===== `ignore_retweets` + + * Value type is <> + * Default value is `false` + +Lets you ingore the retweets coming out of the Twitter API. Default => false + +[id="{version}-plugins-{type}s-{plugin}-keywords"] +===== `keywords` + + * Value type is <> + * There is no default value for this setting. + +Any keywords to track in the Twitter stream. For multiple keywords, use +the syntax ["foo", "bar"]. There's a logical OR between each keyword +string listed and a logical AND between words separated by spaces per +keyword string. +See https://dev.twitter.com/streaming/overview/request-parameters#track +for more details. + +The wildcard "*" option is not supported. To ingest a sample stream of +all tweets, the use_samples option is recommended. + +[id="{version}-plugins-{type}s-{plugin}-languages"] +===== `languages` + + * Value type is <> + * There is no default value for this setting. + +A list of BCP 47 language identifiers corresponding to any of the languages listed +on Twitter’s advanced search page will only return tweets that have been detected +as being written in the specified languages. + +[id="{version}-plugins-{type}s-{plugin}-locations"] +===== `locations` + + * Value type is <> + * There is no default value for this setting. + +A comma-separated list of longitude, latitude pairs specifying a set +of bounding boxes to filter tweets by. +See https://dev.twitter.com/streaming/overview/request-parameters#locations +for more details. + +[id="{version}-plugins-{type}s-{plugin}-oauth_token"] +===== `oauth_token` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your oauth token. + +To get this, login to Twitter with whatever account you want, +then visit + +Click on your app (used with the consumer_key and consumer_secret settings) +Then at the bottom of the page, click 'Create my access token' which +will create an oauth token and secret bound to your account and that +application. + +[id="{version}-plugins-{type}s-{plugin}-oauth_token_secret"] +===== `oauth_token_secret` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your oauth token secret. + +To get this, login to Twitter with whatever account you want, +then visit + +Click on your app (used with the consumer_key and consumer_secret settings) +Then at the bottom of the page, click 'Create my access token' which +will create an oauth token and secret bound to your account and that +application. + +[id="{version}-plugins-{type}s-{plugin}-proxy_address"] +===== `proxy_address` + + * Value type is <> + * Default value is `"127.0.0.1"` + +Location of the proxy, by default the same machine as the one running this LS instance + +[id="{version}-plugins-{type}s-{plugin}-proxy_port"] +===== `proxy_port` + + * Value type is <> + * Default value is `3128` + +Port where the proxy is listening, by default 3128 (squid) + +[id="{version}-plugins-{type}s-{plugin}-rate_limit_reset_in"] +===== `rate_limit_reset_in` + + * Value type is <> + * Default value is `300` + +Duration in seconds to wait before retrying a connection when twitter responds with a 429 TooManyRequests +In some cases the 'x-rate-limit-reset' header is not set in the response and .rate_limit.reset_in +is nil. If this occurs then we use the integer specified here. The default is 5 minutes. + +[id="{version}-plugins-{type}s-{plugin}-use_proxy"] +===== `use_proxy` + + * Value type is <> + * Default value is `false` + +When to use a proxy to handle the connections + +[id="{version}-plugins-{type}s-{plugin}-use_samples"] +===== `use_samples` + + * Value type is <> + * Default value is `false` + +Returns a small random sample of all public statuses. The tweets returned +by the default access level are the same, so if two different clients connect +to this endpoint, they will see the same tweets. If set to true, the keywords, +follows, locations, and languages options will be ignored. Default => false + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/twitter-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/twitter-v3.0.6.asciidoc new file mode 100644 index 000000000..ecddbfc70 --- /dev/null +++ b/docs/versioned-plugins/inputs/twitter-v3.0.6.asciidoc @@ -0,0 +1,226 @@ +:plugin: twitter +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-twitter/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Twitter input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Ingest events from the Twitter Streaming API. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Twitter Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-consumer_key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-consumer_secret>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-follows>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-full_tweet>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ignore_retweets>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keywords>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-languages>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-locations>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-oauth_token>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-oauth_token_secret>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_address>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-rate_limit_reset_in>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_samples>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-consumer_key"] +===== `consumer_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Twitter App's consumer key + +Don't know what this is? You need to create an "application" +on Twitter, see this url: + +[id="{version}-plugins-{type}s-{plugin}-consumer_secret"] +===== `consumer_secret` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Twitter App's consumer secret + +If you don't have one of these, you can create one by +registering a new application with Twitter: + + +[id="{version}-plugins-{type}s-{plugin}-follows"] +===== `follows` + + * Value type is <> + * There is no default value for this setting. + +A comma separated list of user IDs, indicating the users to +return statuses for in the Twitter stream. +See https://dev.twitter.com/streaming/overview/request-parameters#follow +for more details. + +[id="{version}-plugins-{type}s-{plugin}-full_tweet"] +===== `full_tweet` + + * Value type is <> + * Default value is `false` + +Record full tweet object as given to us by the Twitter Streaming API. + +[id="{version}-plugins-{type}s-{plugin}-ignore_retweets"] +===== `ignore_retweets` + + * Value type is <> + * Default value is `false` + +Lets you ignore the retweets coming out of the Twitter API. Default => false + +[id="{version}-plugins-{type}s-{plugin}-keywords"] +===== `keywords` + + * Value type is <> + * There is no default value for this setting. + +Any keywords to track in the Twitter stream. For multiple keywords, use +the syntax ["foo", "bar"]. There's a logical OR between each keyword +string listed and a logical AND between words separated by spaces per +keyword string. +See https://dev.twitter.com/streaming/overview/request-parameters#track +for more details. + +The wildcard "*" option is not supported. To ingest a sample stream of +all tweets, the use_samples option is recommended. + +[id="{version}-plugins-{type}s-{plugin}-languages"] +===== `languages` + + * Value type is <> + * There is no default value for this setting. + +A list of BCP 47 language identifiers corresponding to any of the languages listed +on Twitter’s advanced search page will only return tweets that have been detected +as being written in the specified languages. + +[id="{version}-plugins-{type}s-{plugin}-locations"] +===== `locations` + + * Value type is <> + * There is no default value for this setting. + +A comma-separated list of longitude, latitude pairs specifying a set +of bounding boxes to filter tweets by. +See https://dev.twitter.com/streaming/overview/request-parameters#locations +for more details. + +[id="{version}-plugins-{type}s-{plugin}-oauth_token"] +===== `oauth_token` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your oauth token. + +To get this, login to Twitter with whatever account you want, +then visit + +Click on your app (used with the consumer_key and consumer_secret settings) +Then at the bottom of the page, click 'Create my access token' which +will create an oauth token and secret bound to your account and that +application. + +[id="{version}-plugins-{type}s-{plugin}-oauth_token_secret"] +===== `oauth_token_secret` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your oauth token secret. + +To get this, login to Twitter with whatever account you want, +then visit + +Click on your app (used with the consumer_key and consumer_secret settings) +Then at the bottom of the page, click 'Create my access token' which +will create an oauth token and secret bound to your account and that +application. + +[id="{version}-plugins-{type}s-{plugin}-proxy_address"] +===== `proxy_address` + + * Value type is <> + * Default value is `"127.0.0.1"` + +Location of the proxy, by default the same machine as the one running this LS instance + +[id="{version}-plugins-{type}s-{plugin}-proxy_port"] +===== `proxy_port` + + * Value type is <> + * Default value is `3128` + +Port where the proxy is listening, by default 3128 (squid) + +[id="{version}-plugins-{type}s-{plugin}-rate_limit_reset_in"] +===== `rate_limit_reset_in` + + * Value type is <> + * Default value is `300` + +Duration in seconds to wait before retrying a connection when twitter responds with a 429 TooManyRequests +In some cases the 'x-rate-limit-reset' header is not set in the response and .rate_limit.reset_in +is nil. If this occurs then we use the integer specified here. The default is 5 minutes. + +[id="{version}-plugins-{type}s-{plugin}-use_proxy"] +===== `use_proxy` + + * Value type is <> + * Default value is `false` + +When to use a proxy to handle the connections + +[id="{version}-plugins-{type}s-{plugin}-use_samples"] +===== `use_samples` + + * Value type is <> + * Default value is `false` + +Returns a small random sample of all public statuses. The tweets returned +by the default access level are the same, so if two different clients connect +to this endpoint, they will see the same tweets. If set to true, the keywords, +follows, locations, and languages options will be ignored. Default => false + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/twitter-v3.0.7.asciidoc b/docs/versioned-plugins/inputs/twitter-v3.0.7.asciidoc new file mode 100644 index 000000000..db4273926 --- /dev/null +++ b/docs/versioned-plugins/inputs/twitter-v3.0.7.asciidoc @@ -0,0 +1,226 @@ +:plugin: twitter +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.7 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-twitter/blob/v3.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Twitter input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Ingest events from the Twitter Streaming API. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Twitter Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-consumer_key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-consumer_secret>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-follows>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-full_tweet>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ignore_retweets>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keywords>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-languages>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-locations>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-oauth_token>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-oauth_token_secret>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_address>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-rate_limit_reset_in>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_samples>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-consumer_key"] +===== `consumer_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Twitter App's consumer key + +Don't know what this is? You need to create an "application" +on Twitter, see this url: + +[id="{version}-plugins-{type}s-{plugin}-consumer_secret"] +===== `consumer_secret` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Twitter App's consumer secret + +If you don't have one of these, you can create one by +registering a new application with Twitter: + + +[id="{version}-plugins-{type}s-{plugin}-follows"] +===== `follows` + + * Value type is <> + * There is no default value for this setting. + +A comma separated list of user IDs, indicating the users to +return statuses for in the Twitter stream. +See https://dev.twitter.com/streaming/overview/request-parameters#follow +for more details. + +[id="{version}-plugins-{type}s-{plugin}-full_tweet"] +===== `full_tweet` + + * Value type is <> + * Default value is `false` + +Record full tweet object as given to us by the Twitter Streaming API. + +[id="{version}-plugins-{type}s-{plugin}-ignore_retweets"] +===== `ignore_retweets` + + * Value type is <> + * Default value is `false` + +Lets you ignore the retweets coming out of the Twitter API. Default => false + +[id="{version}-plugins-{type}s-{plugin}-keywords"] +===== `keywords` + + * Value type is <> + * There is no default value for this setting. + +Any keywords to track in the Twitter stream. For multiple keywords, use +the syntax ["foo", "bar"]. There's a logical OR between each keyword +string listed and a logical AND between words separated by spaces per +keyword string. +See https://dev.twitter.com/streaming/overview/request-parameters#track +for more details. + +The wildcard "*" option is not supported. To ingest a sample stream of +all tweets, the use_samples option is recommended. + +[id="{version}-plugins-{type}s-{plugin}-languages"] +===== `languages` + + * Value type is <> + * There is no default value for this setting. + +A list of BCP 47 language identifiers corresponding to any of the languages listed +on Twitter’s advanced search page will only return tweets that have been detected +as being written in the specified languages. + +[id="{version}-plugins-{type}s-{plugin}-locations"] +===== `locations` + + * Value type is <> + * There is no default value for this setting. + +A comma-separated list of longitude, latitude pairs specifying a set +of bounding boxes to filter tweets by. +See https://dev.twitter.com/streaming/overview/request-parameters#locations +for more details. + +[id="{version}-plugins-{type}s-{plugin}-oauth_token"] +===== `oauth_token` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your oauth token. + +To get this, login to Twitter with whatever account you want, +then visit + +Click on your app (used with the consumer_key and consumer_secret settings) +Then at the bottom of the page, click 'Create my access token' which +will create an oauth token and secret bound to your account and that +application. + +[id="{version}-plugins-{type}s-{plugin}-oauth_token_secret"] +===== `oauth_token_secret` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your oauth token secret. + +To get this, login to Twitter with whatever account you want, +then visit + +Click on your app (used with the consumer_key and consumer_secret settings) +Then at the bottom of the page, click 'Create my access token' which +will create an oauth token and secret bound to your account and that +application. + +[id="{version}-plugins-{type}s-{plugin}-proxy_address"] +===== `proxy_address` + + * Value type is <> + * Default value is `"127.0.0.1"` + +Location of the proxy, by default the same machine as the one running this LS instance + +[id="{version}-plugins-{type}s-{plugin}-proxy_port"] +===== `proxy_port` + + * Value type is <> + * Default value is `3128` + +Port where the proxy is listening, by default 3128 (squid) + +[id="{version}-plugins-{type}s-{plugin}-rate_limit_reset_in"] +===== `rate_limit_reset_in` + + * Value type is <> + * Default value is `300` + +Duration in seconds to wait before retrying a connection when twitter responds with a 429 TooManyRequests +In some cases the 'x-rate-limit-reset' header is not set in the response and .rate_limit.reset_in +is nil. If this occurs then we use the integer specified here. The default is 5 minutes. + +[id="{version}-plugins-{type}s-{plugin}-use_proxy"] +===== `use_proxy` + + * Value type is <> + * Default value is `false` + +When to use a proxy to handle the connections + +[id="{version}-plugins-{type}s-{plugin}-use_samples"] +===== `use_samples` + + * Value type is <> + * Default value is `false` + +Returns a small random sample of all public statuses. The tweets returned +by the default access level are the same, so if two different clients connect +to this endpoint, they will see the same tweets. If set to true, the keywords, +follows, locations, and languages options will be ignored. Default => false + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/udp-index.asciidoc b/docs/versioned-plugins/inputs/udp-index.asciidoc new file mode 100644 index 000000000..298415f5e --- /dev/null +++ b/docs/versioned-plugins/inputs/udp-index.asciidoc @@ -0,0 +1,20 @@ +:plugin: udp +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-19 +| <> | 2017-12-15 +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::udp-v3.2.1.asciidoc[] +include::udp-v3.2.0.asciidoc[] +include::udp-v3.1.3.asciidoc[] +include::udp-v3.1.2.asciidoc[] +include::udp-v3.1.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/udp-v3.1.1.asciidoc b/docs/versioned-plugins/inputs/udp-v3.1.1.asciidoc new file mode 100644 index 000000000..2d7f97091 --- /dev/null +++ b/docs/versioned-plugins/inputs/udp-v3.1.1.asciidoc @@ -0,0 +1,106 @@ +:plugin: udp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-udp/blob/v3.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Udp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read messages as events over the network via udp. The only required +configuration item is `port`, which specifies the udp port logstash +will listen on for event streams. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Udp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-buffer_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-workers>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-buffer_size"] +===== `buffer_size` + + * Value type is <> + * Default value is `65536` + +The maximum packet size to read from the network + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address which logstash will listen on. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port which logstash will listen on. Remember that ports less +than 1024 (privileged ports) may require root or elevated privileges to use. + +[id="{version}-plugins-{type}s-{plugin}-queue_size"] +===== `queue_size` + + * Value type is <> + * Default value is `2000` + +This is the number of unprocessed UDP packets you can hold in memory +before packets will start dropping. + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The socket receive buffer size in bytes. +If option is not set, the operating system default is used. +The operating system will use the max allowed value if receive_buffer_bytes is larger than allowed. +Consult your operating system documentation if you need to increase this max allowed value. + +[id="{version}-plugins-{type}s-{plugin}-workers"] +===== `workers` + + * Value type is <> + * Default value is `2` + +Number of threads processing packets + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/udp-v3.1.2.asciidoc b/docs/versioned-plugins/inputs/udp-v3.1.2.asciidoc new file mode 100644 index 000000000..c5ddc7554 --- /dev/null +++ b/docs/versioned-plugins/inputs/udp-v3.1.2.asciidoc @@ -0,0 +1,106 @@ +:plugin: udp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-udp/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Udp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read messages as events over the network via udp. The only required +configuration item is `port`, which specifies the udp port logstash +will listen on for event streams. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Udp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-buffer_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-workers>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-buffer_size"] +===== `buffer_size` + + * Value type is <> + * Default value is `65536` + +The maximum packet size to read from the network + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address which logstash will listen on. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port which logstash will listen on. Remember that ports less +than 1024 (privileged ports) may require root or elevated privileges to use. + +[id="{version}-plugins-{type}s-{plugin}-queue_size"] +===== `queue_size` + + * Value type is <> + * Default value is `2000` + +This is the number of unprocessed UDP packets you can hold in memory +before packets will start dropping. + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The socket receive buffer size in bytes. +If option is not set, the operating system default is used. +The operating system will use the max allowed value if receive_buffer_bytes is larger than allowed. +Consult your operating system documentation if you need to increase this max allowed value. + +[id="{version}-plugins-{type}s-{plugin}-workers"] +===== `workers` + + * Value type is <> + * Default value is `2` + +Number of threads processing packets + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/udp-v3.1.3.asciidoc b/docs/versioned-plugins/inputs/udp-v3.1.3.asciidoc new file mode 100644 index 000000000..3cab2a60f --- /dev/null +++ b/docs/versioned-plugins/inputs/udp-v3.1.3.asciidoc @@ -0,0 +1,106 @@ +:plugin: udp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-udp/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Udp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read messages as events over the network via udp. The only required +configuration item is `port`, which specifies the udp port logstash +will listen on for event streams. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Udp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-buffer_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-workers>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-buffer_size"] +===== `buffer_size` + + * Value type is <> + * Default value is `65536` + +The maximum packet size to read from the network + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address which logstash will listen on. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port which logstash will listen on. Remember that ports less +than 1024 (privileged ports) may require root or elevated privileges to use. + +[id="{version}-plugins-{type}s-{plugin}-queue_size"] +===== `queue_size` + + * Value type is <> + * Default value is `2000` + +This is the number of unprocessed UDP packets you can hold in memory +before packets will start dropping. + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The socket receive buffer size in bytes. +If option is not set, the operating system default is used. +The operating system will use the max allowed value if receive_buffer_bytes is larger than allowed. +Consult your operating system documentation if you need to increase this max allowed value. + +[id="{version}-plugins-{type}s-{plugin}-workers"] +===== `workers` + + * Value type is <> + * Default value is `2` + +Number of threads processing packets + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/udp-v3.2.0.asciidoc b/docs/versioned-plugins/inputs/udp-v3.2.0.asciidoc new file mode 100644 index 000000000..b1c2cea0e --- /dev/null +++ b/docs/versioned-plugins/inputs/udp-v3.2.0.asciidoc @@ -0,0 +1,106 @@ +:plugin: udp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.0 +:release_date: 2017-12-15 +:changelog_url: https://github.com/logstash-plugins/logstash-input-udp/blob/v3.2.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Udp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read messages as events over the network via udp. The only required +configuration item is `port`, which specifies the udp port logstash +will listen on for event streams. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Udp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-buffer_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-workers>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-buffer_size"] +===== `buffer_size` + + * Value type is <> + * Default value is `65536` + +The maximum packet size to read from the network + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address which logstash will listen on. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port which logstash will listen on. Remember that ports less +than 1024 (privileged ports) may require root or elevated privileges to use. + +[id="{version}-plugins-{type}s-{plugin}-queue_size"] +===== `queue_size` + + * Value type is <> + * Default value is `2000` + +This is the number of unprocessed UDP packets you can hold in memory +before packets will start dropping. + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The socket receive buffer size in bytes. +If option is not set, the operating system default is used. +The operating system will use the max allowed value if receive_buffer_bytes is larger than allowed. +Consult your operating system documentation if you need to increase this max allowed value. + +[id="{version}-plugins-{type}s-{plugin}-workers"] +===== `workers` + + * Value type is <> + * Default value is `2` + +Number of threads processing packets + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/udp-v3.2.1.asciidoc b/docs/versioned-plugins/inputs/udp-v3.2.1.asciidoc new file mode 100644 index 000000000..39707b01e --- /dev/null +++ b/docs/versioned-plugins/inputs/udp-v3.2.1.asciidoc @@ -0,0 +1,106 @@ +:plugin: udp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.1 +:release_date: 2017-12-19 +:changelog_url: https://github.com/logstash-plugins/logstash-input-udp/blob/v3.2.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Udp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read messages as events over the network via udp. The only required +configuration item is `port`, which specifies the udp port logstash +will listen on for event streams. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Udp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-buffer_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-workers>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-buffer_size"] +===== `buffer_size` + + * Value type is <> + * Default value is `65536` + +The maximum packet size to read from the network + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address which logstash will listen on. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port which logstash will listen on. Remember that ports less +than 1024 (privileged ports) may require root or elevated privileges to use. + +[id="{version}-plugins-{type}s-{plugin}-queue_size"] +===== `queue_size` + + * Value type is <> + * Default value is `2000` + +This is the number of unprocessed UDP packets you can hold in memory +before packets will start dropping. + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * There is no default value for this setting. + +The socket receive buffer size in bytes. +If option is not set, the operating system default is used. +The operating system will use the max allowed value if receive_buffer_bytes is larger than allowed. +Consult your operating system documentation if you need to increase this max allowed value. + +[id="{version}-plugins-{type}s-{plugin}-workers"] +===== `workers` + + * Value type is <> + * Default value is `2` + +Number of threads processing packets + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/unix-index.asciidoc b/docs/versioned-plugins/inputs/unix-index.asciidoc new file mode 100644 index 000000000..69c5c0aba --- /dev/null +++ b/docs/versioned-plugins/inputs/unix-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: unix +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::unix-v3.0.6.asciidoc[] +include::unix-v3.0.5.asciidoc[] +include::unix-v3.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/unix-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/unix-v3.0.4.asciidoc new file mode 100644 index 000000000..5e4f10445 --- /dev/null +++ b/docs/versioned-plugins/inputs/unix-v3.0.4.asciidoc @@ -0,0 +1,103 @@ +:plugin: unix +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-unix/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Unix input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a UNIX socket. + +Like `stdin` and `file` inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Unix Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-data_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-force_unlink>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-socket_not_present_retry_interval_seconds>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-data_timeout"] +===== `data_timeout` + + * Value type is <> + * Default value is `-1` + +The 'read' timeout in seconds. If a particular connection is idle for +more than this timeout period, we will assume it is dead and close it. + +If you never want to timeout, use -1. + +[id="{version}-plugins-{type}s-{plugin}-force_unlink"] +===== `force_unlink` + + * Value type is <> + * Default value is `false` + +Remove socket file in case of EADDRINUSE failure + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the path to listen on. +When mode is `client`, the path to connect to. + +[id="{version}-plugins-{type}s-{plugin}-socket_not_present_retry_interval_seconds"] +===== `socket_not_present_retry_interval_seconds` + + * This is a required setting. + * Value type is <> + * Default value is `5` + +Amount of time in seconds to wait if the socket file is not present, before retrying. +Only positive values are allowed. + +This setting is only used if `mode` is `client`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/unix-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/unix-v3.0.5.asciidoc new file mode 100644 index 000000000..66a633053 --- /dev/null +++ b/docs/versioned-plugins/inputs/unix-v3.0.5.asciidoc @@ -0,0 +1,103 @@ +:plugin: unix +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-unix/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Unix input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a UNIX socket. + +Like `stdin` and `file` inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Unix Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-data_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-force_unlink>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-socket_not_present_retry_interval_seconds>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-data_timeout"] +===== `data_timeout` + + * Value type is <> + * Default value is `-1` + +The 'read' timeout in seconds. If a particular connection is idle for +more than this timeout period, we will assume it is dead and close it. + +If you never want to timeout, use -1. + +[id="{version}-plugins-{type}s-{plugin}-force_unlink"] +===== `force_unlink` + + * Value type is <> + * Default value is `false` + +Remove socket file in case of EADDRINUSE failure + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the path to listen on. +When mode is `client`, the path to connect to. + +[id="{version}-plugins-{type}s-{plugin}-socket_not_present_retry_interval_seconds"] +===== `socket_not_present_retry_interval_seconds` + + * This is a required setting. + * Value type is <> + * Default value is `5` + +Amount of time in seconds to wait if the socket file is not present, before retrying. +Only positive values are allowed. + +This setting is only used if `mode` is `client`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/unix-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/unix-v3.0.6.asciidoc new file mode 100644 index 000000000..f54e49fcf --- /dev/null +++ b/docs/versioned-plugins/inputs/unix-v3.0.6.asciidoc @@ -0,0 +1,103 @@ +:plugin: unix +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-unix/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Unix input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a UNIX socket. + +Like `stdin` and `file` inputs, each event is assumed to be one line of text. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Unix Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-data_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-force_unlink>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-socket_not_present_retry_interval_seconds>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-data_timeout"] +===== `data_timeout` + + * Value type is <> + * Default value is `-1` + +The 'read' timeout in seconds. If a particular connection is idle for +more than this timeout period, we will assume it is dead and close it. + +If you never want to timeout, use -1. + +[id="{version}-plugins-{type}s-{plugin}-force_unlink"] +===== `force_unlink` + + * Value type is <> + * Default value is `false` + +Remove socket file in case of EADDRINUSE failure + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the path to listen on. +When mode is `client`, the path to connect to. + +[id="{version}-plugins-{type}s-{plugin}-socket_not_present_retry_interval_seconds"] +===== `socket_not_present_retry_interval_seconds` + + * This is a required setting. + * Value type is <> + * Default value is `5` + +Amount of time in seconds to wait if the socket file is not present, before retrying. +Only positive values are allowed. + +This setting is only used if `mode` is `client`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/varnishlog-index.asciidoc b/docs/versioned-plugins/inputs/varnishlog-index.asciidoc new file mode 100644 index 000000000..3cc768a03 --- /dev/null +++ b/docs/versioned-plugins/inputs/varnishlog-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: varnishlog +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::varnishlog-v3.0.3.asciidoc[] +include::varnishlog-v3.0.2.asciidoc[] +include::varnishlog-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/varnishlog-v3.0.1.asciidoc b/docs/versioned-plugins/inputs/varnishlog-v3.0.1.asciidoc new file mode 100644 index 000000000..54139c101 --- /dev/null +++ b/docs/versioned-plugins/inputs/varnishlog-v3.0.1.asciidoc @@ -0,0 +1,52 @@ +:plugin: varnishlog +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-varnishlog/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Varnishlog input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read from varnish cache's shared memory log + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Varnishlog Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/varnishlog-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/varnishlog-v3.0.2.asciidoc new file mode 100644 index 000000000..a0e44363d --- /dev/null +++ b/docs/versioned-plugins/inputs/varnishlog-v3.0.2.asciidoc @@ -0,0 +1,52 @@ +:plugin: varnishlog +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-varnishlog/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Varnishlog input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read from varnish cache's shared memory log + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Varnishlog Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/varnishlog-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/varnishlog-v3.0.3.asciidoc new file mode 100644 index 000000000..72847a673 --- /dev/null +++ b/docs/versioned-plugins/inputs/varnishlog-v3.0.3.asciidoc @@ -0,0 +1,52 @@ +:plugin: varnishlog +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-varnishlog/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Varnishlog input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read from varnish cache's shared memory log + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Varnishlog Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/websocket-index.asciidoc b/docs/versioned-plugins/inputs/websocket-index.asciidoc new file mode 100644 index 000000000..265130c2f --- /dev/null +++ b/docs/versioned-plugins/inputs/websocket-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: websocket +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::websocket-v4.0.3.asciidoc[] +include::websocket-v4.0.2.asciidoc[] +include::websocket-v4.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/websocket-v4.0.1.asciidoc b/docs/versioned-plugins/inputs/websocket-v4.0.1.asciidoc new file mode 100644 index 000000000..cca9d95da --- /dev/null +++ b/docs/versioned-plugins/inputs/websocket-v4.0.1.asciidoc @@ -0,0 +1,64 @@ +:plugin: websocket +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-websocket/blob/v4.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Websocket input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over the websocket protocol. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Websocket Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["client"]`|No +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `client` + * Default value is `"client"` + +Select the plugin's mode of operation. Right now only client mode +is supported, i.e. this plugin connects to a websocket server and +receives events from the server as websocket messages. + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The URL to connect to. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/websocket-v4.0.2.asciidoc b/docs/versioned-plugins/inputs/websocket-v4.0.2.asciidoc new file mode 100644 index 000000000..6035637fd --- /dev/null +++ b/docs/versioned-plugins/inputs/websocket-v4.0.2.asciidoc @@ -0,0 +1,64 @@ +:plugin: websocket +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.2 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-websocket/blob/v4.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Websocket input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over the websocket protocol. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Websocket Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["client"]`|No +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `client` + * Default value is `"client"` + +Select the plugin's mode of operation. Right now only client mode +is supported, i.e. this plugin connects to a websocket server and +receives events from the server as websocket messages. + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The URL to connect to. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/websocket-v4.0.3.asciidoc b/docs/versioned-plugins/inputs/websocket-v4.0.3.asciidoc new file mode 100644 index 000000000..ec144ad32 --- /dev/null +++ b/docs/versioned-plugins/inputs/websocket-v4.0.3.asciidoc @@ -0,0 +1,64 @@ +:plugin: websocket +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-websocket/blob/v4.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Websocket input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over the websocket protocol. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Websocket Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["client"]`|No +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `client` + * Default value is `"client"` + +Select the plugin's mode of operation. Right now only client mode +is supported, i.e. this plugin connects to a websocket server and +receives events from the server as websocket messages. + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The URL to connect to. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/wmi-index.asciidoc b/docs/versioned-plugins/inputs/wmi-index.asciidoc new file mode 100644 index 000000000..8587aba71 --- /dev/null +++ b/docs/versioned-plugins/inputs/wmi-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: wmi +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::wmi-v3.0.3.asciidoc[] +include::wmi-v3.0.2.asciidoc[] +include::wmi-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/wmi-v3.0.1.asciidoc b/docs/versioned-plugins/inputs/wmi-v3.0.1.asciidoc new file mode 100644 index 000000000..9ead53012 --- /dev/null +++ b/docs/versioned-plugins/inputs/wmi-v3.0.1.asciidoc @@ -0,0 +1,119 @@ +:plugin: wmi +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-wmi/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Wmi input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Collect data from WMI query + +This is useful for collecting performance metrics and other data +which is accessible via WMI on a Windows host + +Example: +[source,ruby] + input { + wmi { + query => "select * from Win32_Process" + interval => 10 + } + wmi { + query => "select PercentProcessorTime from Win32_PerfFormattedData_PerfOS_Processor where name = '_Total'" + } + wmi { # Connect to a remote host + query => "select * from Win32_Process" + host => "MyRemoteHost" + user => "mydomain\myuser" + password => "Password" + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Wmi Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +Host to connect to ( Defaults to localhost ) + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `10` + +Polling interval + +[id="{version}-plugins-{type}s-{plugin}-namespace"] +===== `namespace` + + * Value type is <> + * Default value is `"root\\cimv2"` + +Namespace when doing remote connections + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password when doing remote connections + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +WMI query + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username when doing remote connections + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/wmi-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/wmi-v3.0.2.asciidoc new file mode 100644 index 000000000..8405a91cf --- /dev/null +++ b/docs/versioned-plugins/inputs/wmi-v3.0.2.asciidoc @@ -0,0 +1,119 @@ +:plugin: wmi +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-wmi/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Wmi input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Collect data from WMI query + +This is useful for collecting performance metrics and other data +which is accessible via WMI on a Windows host + +Example: +[source,ruby] + input { + wmi { + query => "select * from Win32_Process" + interval => 10 + } + wmi { + query => "select PercentProcessorTime from Win32_PerfFormattedData_PerfOS_Processor where name = '_Total'" + } + wmi { # Connect to a remote host + query => "select * from Win32_Process" + host => "MyRemoteHost" + user => "mydomain\myuser" + password => "Password" + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Wmi Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +Host to connect to ( Defaults to localhost ) + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `10` + +Polling interval + +[id="{version}-plugins-{type}s-{plugin}-namespace"] +===== `namespace` + + * Value type is <> + * Default value is `"root\\cimv2"` + +Namespace when doing remote connections + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password when doing remote connections + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +WMI query + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username when doing remote connections + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/wmi-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/wmi-v3.0.3.asciidoc new file mode 100644 index 000000000..a16498907 --- /dev/null +++ b/docs/versioned-plugins/inputs/wmi-v3.0.3.asciidoc @@ -0,0 +1,119 @@ +:plugin: wmi +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-wmi/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Wmi input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Collect data from WMI query + +This is useful for collecting performance metrics and other data +which is accessible via WMI on a Windows host + +Example: +[source,ruby] + input { + wmi { + query => "select * from Win32_Process" + interval => 10 + } + wmi { + query => "select PercentProcessorTime from Win32_PerfFormattedData_PerfOS_Processor where name = '_Total'" + } + wmi { # Connect to a remote host + query => "select * from Win32_Process" + host => "MyRemoteHost" + user => "mydomain\myuser" + password => "Password" + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Wmi Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-query>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +Host to connect to ( Defaults to localhost ) + +[id="{version}-plugins-{type}s-{plugin}-interval"] +===== `interval` + + * Value type is <> + * Default value is `10` + +Polling interval + +[id="{version}-plugins-{type}s-{plugin}-namespace"] +===== `namespace` + + * Value type is <> + * Default value is `"root\\cimv2"` + +Namespace when doing remote connections + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password when doing remote connections + +[id="{version}-plugins-{type}s-{plugin}-query"] +===== `query` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +WMI query + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username when doing remote connections + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/xmpp-index.asciidoc b/docs/versioned-plugins/inputs/xmpp-index.asciidoc new file mode 100644 index 000000000..32f60ecf4 --- /dev/null +++ b/docs/versioned-plugins/inputs/xmpp-index.asciidoc @@ -0,0 +1,18 @@ +:plugin: xmpp +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-27 +| <> | 2017-06-23 +|======================================================================= + +include::xmpp-v3.1.6.asciidoc[] +include::xmpp-v3.1.5.asciidoc[] +include::xmpp-v3.1.4.asciidoc[] +include::xmpp-v3.1.3.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/xmpp-v3.1.3.asciidoc b/docs/versioned-plugins/inputs/xmpp-v3.1.3.asciidoc new file mode 100644 index 000000000..c12466803 --- /dev/null +++ b/docs/versioned-plugins/inputs/xmpp-v3.1.3.asciidoc @@ -0,0 +1,87 @@ +:plugin: xmpp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-xmpp/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Xmpp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input allows you to receive events over XMPP/Jabber. + +This plugin can be used for accepting events from humans or applications +XMPP, or you can use it for PubSub or general message passing for logstash to +logstash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Xmpp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * There is no default value for this setting. + +The xmpp server to connect to. This is optional. If you omit this setting, +the host on the user/identity is used. (`foo.com` for `user@foo.com`) + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The xmpp password for the user/identity. + +[id="{version}-plugins-{type}s-{plugin}-rooms"] +===== `rooms` + + * Value type is <> + * There is no default value for this setting. + +if muc/multi-user-chat required, give the name of the room that +you want to join: `room@conference.domain/nick` + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The user or resource ID, like `foo@example.com`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/xmpp-v3.1.4.asciidoc b/docs/versioned-plugins/inputs/xmpp-v3.1.4.asciidoc new file mode 100644 index 000000000..e32b0cb01 --- /dev/null +++ b/docs/versioned-plugins/inputs/xmpp-v3.1.4.asciidoc @@ -0,0 +1,87 @@ +:plugin: xmpp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.4 +:release_date: 2017-06-27 +:changelog_url: https://github.com/logstash-plugins/logstash-input-xmpp/blob/v3.1.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Xmpp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input allows you to receive events over XMPP/Jabber. + +This plugin can be used for accepting events from humans or applications +XMPP, or you can use it for PubSub or general message passing for logstash to +logstash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Xmpp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * There is no default value for this setting. + +The xmpp server to connect to. This is optional. If you omit this setting, +the host on the user/identity is used. (`foo.com` for `user@foo.com`) + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The xmpp password for the user/identity. + +[id="{version}-plugins-{type}s-{plugin}-rooms"] +===== `rooms` + + * Value type is <> + * There is no default value for this setting. + +if muc/multi-user-chat required, give the name of the room that +you want to join: `room@conference.domain/nick` + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The user or resource ID, like `foo@example.com`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/xmpp-v3.1.5.asciidoc b/docs/versioned-plugins/inputs/xmpp-v3.1.5.asciidoc new file mode 100644 index 000000000..813db700f --- /dev/null +++ b/docs/versioned-plugins/inputs/xmpp-v3.1.5.asciidoc @@ -0,0 +1,87 @@ +:plugin: xmpp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.5 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-xmpp/blob/v3.1.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Xmpp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input allows you to receive events over XMPP/Jabber. + +This plugin can be used for accepting events from humans or applications +XMPP, or you can use it for PubSub or general message passing for logstash to +logstash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Xmpp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * There is no default value for this setting. + +The xmpp server to connect to. This is optional. If you omit this setting, +the host on the user/identity is used. (`foo.com` for `user@foo.com`) + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The xmpp password for the user/identity. + +[id="{version}-plugins-{type}s-{plugin}-rooms"] +===== `rooms` + + * Value type is <> + * There is no default value for this setting. + +if muc/multi-user-chat required, give the name of the room that +you want to join: `room@conference.domain/nick` + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The user or resource ID, like `foo@example.com`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/xmpp-v3.1.6.asciidoc b/docs/versioned-plugins/inputs/xmpp-v3.1.6.asciidoc new file mode 100644 index 000000000..6dafde59c --- /dev/null +++ b/docs/versioned-plugins/inputs/xmpp-v3.1.6.asciidoc @@ -0,0 +1,87 @@ +:plugin: xmpp +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.6 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-input-xmpp/blob/v3.1.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Xmpp input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This input allows you to receive events over XMPP/Jabber. + +This plugin can be used for accepting events from humans or applications +XMPP, or you can use it for PubSub or general message passing for logstash to +logstash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Xmpp Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * There is no default value for this setting. + +The xmpp server to connect to. This is optional. If you omit this setting, +the host on the user/identity is used. (`foo.com` for `user@foo.com`) + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The xmpp password for the user/identity. + +[id="{version}-plugins-{type}s-{plugin}-rooms"] +===== `rooms` + + * Value type is <> + * There is no default value for this setting. + +if muc/multi-user-chat required, give the name of the room that +you want to join: `room@conference.domain/nick` + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The user or resource ID, like `foo@example.com`. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/zenoss-index.asciidoc b/docs/versioned-plugins/inputs/zenoss-index.asciidoc new file mode 100644 index 000000000..1a283f2e5 --- /dev/null +++ b/docs/versioned-plugins/inputs/zenoss-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: zenoss +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::zenoss-v2.0.6.asciidoc[] +include::zenoss-v2.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/zenoss-v2.0.5.asciidoc b/docs/versioned-plugins/inputs/zenoss-v2.0.5.asciidoc new file mode 100644 index 000000000..5fa3a3778 --- /dev/null +++ b/docs/versioned-plugins/inputs/zenoss-v2.0.5.asciidoc @@ -0,0 +1,398 @@ +:plugin: zenoss +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-zenoss/blob/v2.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Zenoss input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read Zenoss events from the zenoss.zenevents fanout exchange. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Zenoss Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ack>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-auto_delete>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclusive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_enabled>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefetch_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ack"] +===== `ack` + + * Value type is <> + * Default value is `true` + +Enable message acknowledgements. With acknowledgements +messages fetched by Logstash but not yet sent into the +Logstash pipeline will be requeued by the server if Logstash +shuts down. Acknowledgements will however hurt the message +throughput. + +This will only send an ack back every `prefetch_count` messages. +Working in batches provides a performance boost here. + +[id="{version}-plugins-{type}s-{plugin}-arguments"] +===== `arguments` + + * Value type is <> + * Default value is `{}` + +Extra queue arguments as an array. +To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` + +[id="{version}-plugins-{type}s-{plugin}-auto_delete"] +===== `auto_delete` + + * Value type is <> + * Default value is `false` + +Should the queue be deleted on the broker when the last consumer +disconnects? Set this option to `false` if you want the queue to remain +on the broker, queueing up messages until a consumer comes along to +consume them. + +[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] +===== `automatic_recovery` + + * Value type is <> + * Default value is `true` + +Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! + +[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] +===== `connect_retry_interval` + + * Value type is <> + * Default value is `1` + +Time in seconds to wait before retrying a connection + +[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] +===== `connection_timeout` + + * Value type is <> + * There is no default value for this setting. + +The default connection timeout in milliseconds. If not specified the timeout is infinite. + +[id="{version}-plugins-{type}s-{plugin}-durable"] +===== `durable` + + * Value type is <> + * Default value is `false` + +Is this queue durable? (aka; Should it survive a broker restart?) + +[id="{version}-plugins-{type}s-{plugin}-exchange"] +===== `exchange` + + * Value type is <> + * Default value is `"zenoss.zenevents"` + +The name of the exchange to bind the queue. This is analogous to the 'rabbitmq +output' [config 'name'](../outputs/rabbitmq) + +[id="{version}-plugins-{type}s-{plugin}-exchange_type"] +===== `exchange_type` + + * Value type is <> + * There is no default value for this setting. + +The type of the exchange to bind to. Specifying this will cause this plugin +to declare the exchange if it does not exist. + +[id="{version}-plugins-{type}s-{plugin}-exclusive"] +===== `exclusive` + + * Value type is <> + * Default value is `false` + +Is the queue exclusive? Exclusive queues can only be used by the connection +that declared them and will be deleted when it is closed (e.g. due to a Logstash +restart). + +[id="{version}-plugins-{type}s-{plugin}-heartbeat"] +===== `heartbeat` + + * Value type is <> + * There is no default value for this setting. + +Heartbeat delay in seconds. If unspecified no heartbeats will be sent + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +Your rabbitmq server address + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * Default value is `"zenoss.zenevent.#"` + +The routing key to use. This is only valid for direct or fanout exchanges + +* Routing keys are ignored on topic exchanges. +* Wildcards are not valid on direct exchanges. + +[id="{version}-plugins-{type}s-{plugin}-metadata_enabled"] +===== `metadata_enabled` + + * Value type is <> + * Default value is `false` + +Enable the storage of message headers and properties in `@metadata`. This may impact performance + +[id="{version}-plugins-{type}s-{plugin}-passive"] +===== `passive` + + * Value type is <> + * Default value is `false` + +Passive queue creation? Useful for checking queue existance without modifying server state + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `"zenoss"` + +Your rabbitmq password + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5672` + +RabbitMQ port to connect on + +[id="{version}-plugins-{type}s-{plugin}-prefetch_count"] +===== `prefetch_count` + + * Value type is <> + * Default value is `256` + +Prefetch count. If acknowledgements are enabled with the `ack` +option, specifies the number of outstanding unacknowledged +messages allowed. + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * Value type is <> + * Default value is `""` + +Pull events from a http://www.rabbitmq.com/[RabbitMQ] queue. + +The default settings will create an entirely transient queue and listen for all messages by default. +If you need durability or any other advanced settings, please set the appropriate options + +This plugin uses the http://rubymarchhare.info/[March Hare] library +for interacting with the RabbitMQ server. Most configuration options +map directly to standard RabbitMQ and AMQP concepts. The +https://www.rabbitmq.com/amqp-0-9-1-reference.html[AMQP 0-9-1 reference guide] +and other parts of the RabbitMQ documentation are useful for deeper +understanding. + +The properties of messages received will be stored in the +`[@metadata][rabbitmq_properties]` field if the `@metadata_enabled` setting is checked. +Note that storing metadata may degrade performance. +The following properties may be available (in most cases dependent on whether +they were set by the sender): + +* app-id +* cluster-id +* consumer-tag +* content-encoding +* content-type +* correlation-id +* delivery-mode +* exchange +* expiration +* message-id +* priority +* redeliver +* reply-to +* routing-key +* timestamp +* type +* user-id + +For example, to get the RabbitMQ message's timestamp property +into the Logstash event's `@timestamp` field, use the date +filter to parse the `[@metadata][rabbitmq_properties][timestamp]` +field: +[source,ruby] + filter { + if [@metadata][rabbitmq_properties][timestamp] { + date { + match => ["[@metadata][rabbitmq_properties][timestamp]", "UNIX"] + } + } + } + +Additionally, any message headers will be saved in the +`[@metadata][rabbitmq_headers]` field. +The properties to extract from each message and store in a +@metadata field. + +Technically the exchange, redeliver, and routing-key +properties belong to the envelope and not the message but we +ignore that distinction here. However, we extract the +headers separately via get_headers even though the header +table technically is a message property. + +Freezing all strings so that code modifying the event's +@metadata field can't touch them. + +If updating this list, remember to update the documentation +above too. +The default codec for this plugin is JSON. You can override this to suit your particular needs however. +The name of the queue Logstash will consume events from. If +left empty, a transient queue with an randomly chosen name +will be created. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable or disable SSL. +Note that by default remote certificate verification is off. +Specify ssl_certificate_path and ssl_certificate_password if you need +certificate verification + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] +===== `ssl_certificate_password` + + * Value type is <> + * There is no default value for this setting. + +Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] +===== `ssl_certificate_path` + + * Value type is <> + * There is no default value for this setting. + +Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host + +[id="{version}-plugins-{type}s-{plugin}-ssl_version"] +===== `ssl_version` + + * Value type is <> + * Default value is `"TLSv1.2"` + +Version of the SSL protocol to use. + +[id="{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds"] +===== `subscription_retry_interval_seconds` + + * This is a required setting. + * Value type is <> + * Default value is `5` + +Amount of time in seconds to wait after a failed subscription request +before retrying. Subscribes can fail if the server goes away and then comes back. + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-tls_certificate_password"] +===== `tls_certificate_password` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +TLS certificate password + +[id="{version}-plugins-{type}s-{plugin}-tls_certificate_path"] +===== `tls_certificate_path` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +TLS certifcate path + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"zenoss"` + +Your rabbitmq username + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `"/zenoss"` + +The vhost to use. If you don't know what this is, leave the default. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/zenoss-v2.0.6.asciidoc b/docs/versioned-plugins/inputs/zenoss-v2.0.6.asciidoc new file mode 100644 index 000000000..8a7f9609d --- /dev/null +++ b/docs/versioned-plugins/inputs/zenoss-v2.0.6.asciidoc @@ -0,0 +1,398 @@ +:plugin: zenoss +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.6 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-input-zenoss/blob/v2.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Zenoss input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read Zenoss events from the zenoss.zenevents fanout exchange. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Zenoss Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-ack>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-auto_delete>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclusive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_enabled>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-prefetch_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-ack"] +===== `ack` + + * Value type is <> + * Default value is `true` + +Enable message acknowledgements. With acknowledgements +messages fetched by Logstash but not yet sent into the +Logstash pipeline will be requeued by the server if Logstash +shuts down. Acknowledgements will however hurt the message +throughput. + +This will only send an ack back every `prefetch_count` messages. +Working in batches provides a performance boost here. + +[id="{version}-plugins-{type}s-{plugin}-arguments"] +===== `arguments` + + * Value type is <> + * Default value is `{}` + +Extra queue arguments as an array. +To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` + +[id="{version}-plugins-{type}s-{plugin}-auto_delete"] +===== `auto_delete` + + * Value type is <> + * Default value is `false` + +Should the queue be deleted on the broker when the last consumer +disconnects? Set this option to `false` if you want the queue to remain +on the broker, queueing up messages until a consumer comes along to +consume them. + +[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] +===== `automatic_recovery` + + * Value type is <> + * Default value is `true` + +Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! + +[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] +===== `connect_retry_interval` + + * Value type is <> + * Default value is `1` + +Time in seconds to wait before retrying a connection + +[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] +===== `connection_timeout` + + * Value type is <> + * There is no default value for this setting. + +The default connection timeout in milliseconds. If not specified the timeout is infinite. + +[id="{version}-plugins-{type}s-{plugin}-durable"] +===== `durable` + + * Value type is <> + * Default value is `false` + +Is this queue durable? (aka; Should it survive a broker restart?) + +[id="{version}-plugins-{type}s-{plugin}-exchange"] +===== `exchange` + + * Value type is <> + * Default value is `"zenoss.zenevents"` + +The name of the exchange to bind the queue. This is analogous to the 'rabbitmq +output' [config 'name'](../outputs/rabbitmq) + +[id="{version}-plugins-{type}s-{plugin}-exchange_type"] +===== `exchange_type` + + * Value type is <> + * There is no default value for this setting. + +The type of the exchange to bind to. Specifying this will cause this plugin +to declare the exchange if it does not exist. + +[id="{version}-plugins-{type}s-{plugin}-exclusive"] +===== `exclusive` + + * Value type is <> + * Default value is `false` + +Is the queue exclusive? Exclusive queues can only be used by the connection +that declared them and will be deleted when it is closed (e.g. due to a Logstash +restart). + +[id="{version}-plugins-{type}s-{plugin}-heartbeat"] +===== `heartbeat` + + * Value type is <> + * There is no default value for this setting. + +Heartbeat delay in seconds. If unspecified no heartbeats will be sent + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +Your rabbitmq server address + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * Default value is `"zenoss.zenevent.#"` + +The routing key to use. This is only valid for direct or fanout exchanges + +* Routing keys are ignored on topic exchanges. +* Wildcards are not valid on direct exchanges. + +[id="{version}-plugins-{type}s-{plugin}-metadata_enabled"] +===== `metadata_enabled` + + * Value type is <> + * Default value is `false` + +Enable the storage of message headers and properties in `@metadata`. This may impact performance + +[id="{version}-plugins-{type}s-{plugin}-passive"] +===== `passive` + + * Value type is <> + * Default value is `false` + +Passive queue creation? Useful for checking queue existance without modifying server state + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `"zenoss"` + +Your rabbitmq password + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5672` + +RabbitMQ port to connect on + +[id="{version}-plugins-{type}s-{plugin}-prefetch_count"] +===== `prefetch_count` + + * Value type is <> + * Default value is `256` + +Prefetch count. If acknowledgements are enabled with the `ack` +option, specifies the number of outstanding unacknowledged +messages allowed. + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * Value type is <> + * Default value is `""` + +Pull events from a http://www.rabbitmq.com/[RabbitMQ] queue. + +The default settings will create an entirely transient queue and listen for all messages by default. +If you need durability or any other advanced settings, please set the appropriate options + +This plugin uses the http://rubymarchhare.info/[March Hare] library +for interacting with the RabbitMQ server. Most configuration options +map directly to standard RabbitMQ and AMQP concepts. The +https://www.rabbitmq.com/amqp-0-9-1-reference.html[AMQP 0-9-1 reference guide] +and other parts of the RabbitMQ documentation are useful for deeper +understanding. + +The properties of messages received will be stored in the +`[@metadata][rabbitmq_properties]` field if the `@metadata_enabled` setting is checked. +Note that storing metadata may degrade performance. +The following properties may be available (in most cases dependent on whether +they were set by the sender): + +* app-id +* cluster-id +* consumer-tag +* content-encoding +* content-type +* correlation-id +* delivery-mode +* exchange +* expiration +* message-id +* priority +* redeliver +* reply-to +* routing-key +* timestamp +* type +* user-id + +For example, to get the RabbitMQ message's timestamp property +into the Logstash event's `@timestamp` field, use the date +filter to parse the `[@metadata][rabbitmq_properties][timestamp]` +field: +[source,ruby] + filter { + if [@metadata][rabbitmq_properties][timestamp] { + date { + match => ["[@metadata][rabbitmq_properties][timestamp]", "UNIX"] + } + } + } + +Additionally, any message headers will be saved in the +`[@metadata][rabbitmq_headers]` field. +The properties to extract from each message and store in a +@metadata field. + +Technically the exchange, redeliver, and routing-key +properties belong to the envelope and not the message but we +ignore that distinction here. However, we extract the +headers separately via get_headers even though the header +table technically is a message property. + +Freezing all strings so that code modifying the event's +@metadata field can't touch them. + +If updating this list, remember to update the documentation +above too. +The default codec for this plugin is JSON. You can override this to suit your particular needs however. +The name of the queue Logstash will consume events from. If +left empty, a transient queue with an randomly chosen name +will be created. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable or disable SSL. +Note that by default remote certificate verification is off. +Specify ssl_certificate_path and ssl_certificate_password if you need +certificate verification + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] +===== `ssl_certificate_password` + + * Value type is <> + * There is no default value for this setting. + +Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] +===== `ssl_certificate_path` + + * Value type is <> + * There is no default value for this setting. + +Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host + +[id="{version}-plugins-{type}s-{plugin}-ssl_version"] +===== `ssl_version` + + * Value type is <> + * Default value is `"TLSv1.2"` + +Version of the SSL protocol to use. + +[id="{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds"] +===== `subscription_retry_interval_seconds` + + * This is a required setting. + * Value type is <> + * Default value is `5` + +Amount of time in seconds to wait after a failed subscription request +before retrying. Subscribes can fail if the server goes away and then comes back. + +[id="{version}-plugins-{type}s-{plugin}-threads"] +===== `threads` + + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-tls_certificate_password"] +===== `tls_certificate_password` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +TLS certificate password + +[id="{version}-plugins-{type}s-{plugin}-tls_certificate_path"] +===== `tls_certificate_path` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +TLS certifcate path + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"zenoss"` + +Your rabbitmq username + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `"/zenoss"` + +The vhost to use. If you don't know what this is, leave the default. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/zeromq-index.asciidoc b/docs/versioned-plugins/inputs/zeromq-index.asciidoc new file mode 100644 index 000000000..703501002 --- /dev/null +++ b/docs/versioned-plugins/inputs/zeromq-index.asciidoc @@ -0,0 +1,12 @@ +:plugin: zeromq +:type: input + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-06-23 +|======================================================================= + +include::zeromq-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/inputs/zeromq-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/zeromq-v3.0.3.asciidoc new file mode 100644 index 000000000..d4416b778 --- /dev/null +++ b/docs/versioned-plugins/inputs/zeromq-v3.0.3.asciidoc @@ -0,0 +1,159 @@ +:plugin: zeromq +:type: input + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-input-zeromq/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Zeromq input plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Read events over a 0MQ SUB socket. + +You need to have the 0mq 2.1.x library installed to be able to use +this input plugin. + +The default settings will create a subscriber binding to `tcp://127.0.0.1:2120` +waiting for connecting publishers. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Zeromq Input Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sockopt>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topic>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topic_field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topology>> |<>, one of `["pushpull", "pubsub", "pair"]`|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +input plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-address"] +===== `address` + + * Value type is <> + * Default value is `["tcp://*:2120"]` + +0mq socket address to connect or bind +Please note that `inproc://` will not work with logstash +as each we use a context per thread. +By default, inputs bind/listen +and outputs connect + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"server"` + +mode +server mode binds/listens +client mode connects + +[id="{version}-plugins-{type}s-{plugin}-sender"] +===== `sender` + + * Value type is <> + * There is no default value for this setting. + +sender +overrides the sender to +set the source of the event +default is `zmq+topology://type/` + +[id="{version}-plugins-{type}s-{plugin}-sockopt"] +===== `sockopt` + + * Value type is <> + * Default value is `{"ZMQ::RCVTIMEO"=>"1000"}` + +0mq socket options +This exposes `zmq_setsockopt` +for advanced tuning +see http://api.zeromq.org/2-1:zmq-setsockopt for details + +This is where you would set values like: + + * `ZMQ::HWM` - high water mark + * `ZMQ::IDENTITY` - named queues + * `ZMQ::SWAP_SIZE` - space for disk overflow + +Example: +[source,ruby] + sockopt => { + "ZMQ::HWM" => 50 + "ZMQ::IDENTITY" => "my_named_queue" + } + +defaults to: `sockopt => { "ZMQ::RCVTIMEO" => "1000" }`, which has the effect of "interrupting" +the recv operation at least once every second to allow for properly shutdown handling. + +[id="{version}-plugins-{type}s-{plugin}-topic"] +===== `topic` + + * Value type is <> + * There is no default value for this setting. + +0mq topic +This is used for the `pubsub` topology only +On inputs, this allows you to filter messages by topic +On outputs, this allows you to tag a message for routing +NOTE: ZeroMQ does subscriber side filtering. +NOTE: All topics have an implicit wildcard at the end +You can specify multiple topics here + +[id="{version}-plugins-{type}s-{plugin}-topic_field"] +===== `topic_field` + + * Value type is <> + * Default value is `"topic"` + +Event topic field +This is used for the `pubsub` topology only +When a message is received on a topic, the topic name on which +the message was received will saved in this field. + +[id="{version}-plugins-{type}s-{plugin}-topology"] +===== `topology` + + * This is a required setting. + * Value can be any of: `pushpull`, `pubsub`, `pair` + * There is no default value for this setting. + +0mq topology +The default logstash topologies work as follows: + +* pushpull - inputs are pull, outputs are push +* pubsub - inputs are subscribers, outputs are publishers +* pair - inputs are clients, inputs are servers + +If the predefined topology flows don't work for you, +you can change the `mode` setting + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs-index.asciidoc b/docs/versioned-plugins/outputs-index.asciidoc new file mode 100644 index 000000000..91a197d6e --- /dev/null +++ b/docs/versioned-plugins/outputs-index.asciidoc @@ -0,0 +1,76 @@ +:type: output +:type_uc: Output + +include::include/plugin-intro.asciidoc[] + +include::outputs/beats-index.asciidoc[] +include::outputs/boundary-index.asciidoc[] +include::outputs/circonus-index.asciidoc[] +include::outputs/cloudwatch-index.asciidoc[] +include::outputs/csv-index.asciidoc[] +include::outputs/datadog-index.asciidoc[] +include::outputs/datadog_metrics-index.asciidoc[] +include::outputs/elasticsearch-index.asciidoc[] +include::outputs/elasticsearch_java-index.asciidoc[] +include::outputs/email-index.asciidoc[] +include::outputs/example-index.asciidoc[] +include::outputs/exec-index.asciidoc[] +include::outputs/file-index.asciidoc[] +include::outputs/firehose-index.asciidoc[] +include::outputs/ganglia-index.asciidoc[] +include::outputs/gelf-index.asciidoc[] +include::outputs/gemfire-index.asciidoc[] +include::outputs/google_bigquery-index.asciidoc[] +include::outputs/google_cloud_storage-index.asciidoc[] +include::outputs/graphite-index.asciidoc[] +include::outputs/graphtastic-index.asciidoc[] +include::outputs/hipchat-index.asciidoc[] +include::outputs/http-index.asciidoc[] +include::outputs/icinga-index.asciidoc[] +include::outputs/influxdb-index.asciidoc[] +include::outputs/irc-index.asciidoc[] +include::outputs/jira-index.asciidoc[] +include::outputs/jms-index.asciidoc[] +include::outputs/juggernaut-index.asciidoc[] +include::outputs/kafka-index.asciidoc[] +include::outputs/librato-index.asciidoc[] +include::outputs/logentries-index.asciidoc[] +include::outputs/loggly-index.asciidoc[] +include::outputs/lumberjack-index.asciidoc[] +include::outputs/metriccatcher-index.asciidoc[] +include::outputs/monasca_log_api-index.asciidoc[] +include::outputs/mongodb-index.asciidoc[] +include::outputs/nagios-index.asciidoc[] +include::outputs/nagios_nsca-index.asciidoc[] +include::outputs/neo4j-index.asciidoc[] +include::outputs/newrelic-index.asciidoc[] +include::outputs/null-index.asciidoc[] +include::outputs/opentsdb-index.asciidoc[] +include::outputs/pagerduty-index.asciidoc[] +include::outputs/pipe-index.asciidoc[] +include::outputs/rabbitmq-index.asciidoc[] +include::outputs/rackspace-index.asciidoc[] +include::outputs/rados-index.asciidoc[] +include::outputs/redis-index.asciidoc[] +include::outputs/redmine-index.asciidoc[] +include::outputs/riak-index.asciidoc[] +include::outputs/riemann-index.asciidoc[] +include::outputs/s3-index.asciidoc[] +include::outputs/slack-index.asciidoc[] +include::outputs/sns-index.asciidoc[] +include::outputs/solr_http-index.asciidoc[] +include::outputs/sqs-index.asciidoc[] +include::outputs/statsd-index.asciidoc[] +include::outputs/stdout-index.asciidoc[] +include::outputs/stomp-index.asciidoc[] +include::outputs/syslog-index.asciidoc[] +include::outputs/tcp-index.asciidoc[] +include::outputs/timber-index.asciidoc[] +include::outputs/udp-index.asciidoc[] +include::outputs/webhdfs-index.asciidoc[] +include::outputs/websocket-index.asciidoc[] +include::outputs/xmpp-index.asciidoc[] +include::outputs/zabbix-index.asciidoc[] +include::outputs/zeromq-index.asciidoc[] +include::outputs/zookeeper-index.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/beats-index.asciidoc b/docs/versioned-plugins/outputs/beats-index.asciidoc new file mode 100644 index 000000000..1083feaf5 --- /dev/null +++ b/docs/versioned-plugins/outputs/beats-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: beats +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/outputs/boundary-index.asciidoc b/docs/versioned-plugins/outputs/boundary-index.asciidoc new file mode 100644 index 000000000..b05e4a56d --- /dev/null +++ b/docs/versioned-plugins/outputs/boundary-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: boundary +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::boundary-v3.0.4.asciidoc[] +include::boundary-v3.0.3.asciidoc[] +include::boundary-v3.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/boundary-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/boundary-v3.0.2.asciidoc new file mode 100644 index 000000000..f69f950a6 --- /dev/null +++ b/docs/versioned-plugins/outputs/boundary-v3.0.2.asciidoc @@ -0,0 +1,139 @@ +:plugin: boundary +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-boundary/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Boundary output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you send annotations to +Boundary based on Logstash events + +Note that since Logstash maintains no state +these will be one-shot events + +By default the start and stop time will be +the event timestamp + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Boundary Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-auto>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bsubtype>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-btags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-btype>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-end_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-org_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-start_time>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-api_key"] +===== `api_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Boundary API key + +[id="{version}-plugins-{type}s-{plugin}-auto"] +===== `auto` + + * Value type is <> + * Default value is `false` + +Auto +If set to true, logstash will try to pull boundary fields out +of the event. Any field explicitly set by config options will +override these. +`['type', 'subtype', 'creation_time', 'end_time', 'links', 'tags', 'loc']` + +[id="{version}-plugins-{type}s-{plugin}-bsubtype"] +===== `bsubtype` + + * Value type is <> + * There is no default value for this setting. + +Sub-Type + +[id="{version}-plugins-{type}s-{plugin}-btags"] +===== `btags` + + * Value type is <> + * There is no default value for this setting. + +Tags +Set any custom tags for this event +Default are the Logstash tags if any + +[id="{version}-plugins-{type}s-{plugin}-btype"] +===== `btype` + + * Value type is <> + * There is no default value for this setting. + +Type + +[id="{version}-plugins-{type}s-{plugin}-end_time"] +===== `end_time` + + * Value type is <> + * There is no default value for this setting. + +End time +Override the stop time +Note that Boundary requires this to be seconds since epoch +If overriding, it is your responsibility to type this correctly +By default this is set to `event.get("@timestamp").to_i` + +[id="{version}-plugins-{type}s-{plugin}-org_id"] +===== `org_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Boundary Org ID + +[id="{version}-plugins-{type}s-{plugin}-start_time"] +===== `start_time` + + * Value type is <> + * There is no default value for this setting. + +Start time +Override the start time +Note that Boundary requires this to be seconds since epoch +If overriding, it is your responsibility to type this correctly +By default this is set to `event.get("@timestamp").to_i` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/boundary-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/boundary-v3.0.3.asciidoc new file mode 100644 index 000000000..bcc2a3ab7 --- /dev/null +++ b/docs/versioned-plugins/outputs/boundary-v3.0.3.asciidoc @@ -0,0 +1,139 @@ +:plugin: boundary +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-boundary/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Boundary output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you send annotations to +Boundary based on Logstash events + +Note that since Logstash maintains no state +these will be one-shot events + +By default the start and stop time will be +the event timestamp + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Boundary Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-auto>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bsubtype>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-btags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-btype>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-end_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-org_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-start_time>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-api_key"] +===== `api_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Boundary API key + +[id="{version}-plugins-{type}s-{plugin}-auto"] +===== `auto` + + * Value type is <> + * Default value is `false` + +Auto +If set to true, logstash will try to pull boundary fields out +of the event. Any field explicitly set by config options will +override these. +`['type', 'subtype', 'creation_time', 'end_time', 'links', 'tags', 'loc']` + +[id="{version}-plugins-{type}s-{plugin}-bsubtype"] +===== `bsubtype` + + * Value type is <> + * There is no default value for this setting. + +Sub-Type + +[id="{version}-plugins-{type}s-{plugin}-btags"] +===== `btags` + + * Value type is <> + * There is no default value for this setting. + +Tags +Set any custom tags for this event +Default are the Logstash tags if any + +[id="{version}-plugins-{type}s-{plugin}-btype"] +===== `btype` + + * Value type is <> + * There is no default value for this setting. + +Type + +[id="{version}-plugins-{type}s-{plugin}-end_time"] +===== `end_time` + + * Value type is <> + * There is no default value for this setting. + +End time +Override the stop time +Note that Boundary requires this to be seconds since epoch +If overriding, it is your responsibility to type this correctly +By default this is set to `event.get("@timestamp").to_i` + +[id="{version}-plugins-{type}s-{plugin}-org_id"] +===== `org_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Boundary Org ID + +[id="{version}-plugins-{type}s-{plugin}-start_time"] +===== `start_time` + + * Value type is <> + * There is no default value for this setting. + +Start time +Override the start time +Note that Boundary requires this to be seconds since epoch +If overriding, it is your responsibility to type this correctly +By default this is set to `event.get("@timestamp").to_i` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/boundary-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/boundary-v3.0.4.asciidoc new file mode 100644 index 000000000..86783ab71 --- /dev/null +++ b/docs/versioned-plugins/outputs/boundary-v3.0.4.asciidoc @@ -0,0 +1,139 @@ +:plugin: boundary +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-boundary/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Boundary output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you send annotations to +Boundary based on Logstash events + +Note that since Logstash maintains no state +these will be one-shot events + +By default the start and stop time will be +the event timestamp + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Boundary Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-auto>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bsubtype>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-btags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-btype>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-end_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-org_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-start_time>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-api_key"] +===== `api_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Boundary API key + +[id="{version}-plugins-{type}s-{plugin}-auto"] +===== `auto` + + * Value type is <> + * Default value is `false` + +Auto +If set to true, logstash will try to pull boundary fields out +of the event. Any field explicitly set by config options will +override these. +`['type', 'subtype', 'creation_time', 'end_time', 'links', 'tags', 'loc']` + +[id="{version}-plugins-{type}s-{plugin}-bsubtype"] +===== `bsubtype` + + * Value type is <> + * There is no default value for this setting. + +Sub-Type + +[id="{version}-plugins-{type}s-{plugin}-btags"] +===== `btags` + + * Value type is <> + * There is no default value for this setting. + +Tags +Set any custom tags for this event +Default are the Logstash tags if any + +[id="{version}-plugins-{type}s-{plugin}-btype"] +===== `btype` + + * Value type is <> + * There is no default value for this setting. + +Type + +[id="{version}-plugins-{type}s-{plugin}-end_time"] +===== `end_time` + + * Value type is <> + * There is no default value for this setting. + +End time +Override the stop time +Note that Boundary requires this to be seconds since epoch +If overriding, it is your responsibility to type this correctly +By default this is set to `event.get("@timestamp").to_i` + +[id="{version}-plugins-{type}s-{plugin}-org_id"] +===== `org_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Boundary Org ID + +[id="{version}-plugins-{type}s-{plugin}-start_time"] +===== `start_time` + + * Value type is <> + * There is no default value for this setting. + +Start time +Override the start time +Note that Boundary requires this to be seconds since epoch +If overriding, it is your responsibility to type this correctly +By default this is set to `event.get("@timestamp").to_i` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/circonus-index.asciidoc b/docs/versioned-plugins/outputs/circonus-index.asciidoc new file mode 100644 index 000000000..b5daf84a5 --- /dev/null +++ b/docs/versioned-plugins/outputs/circonus-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: circonus +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::circonus-v3.0.4.asciidoc[] +include::circonus-v3.0.3.asciidoc[] +include::circonus-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/circonus-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/circonus-v3.0.1.asciidoc new file mode 100644 index 000000000..1d963d5bf --- /dev/null +++ b/docs/versioned-plugins/outputs/circonus-v3.0.1.asciidoc @@ -0,0 +1,93 @@ +:plugin: circonus +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-circonus/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Circonus output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Circonus Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-annotation>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-api_token>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-app_name>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-annotation"] +===== `annotation` + + * This is a required setting. + * Value type is <> + * Default value is `{}` + +Annotations +Registers an annotation with Circonus +The only required field is `title` and `description`. +`start` and `stop` will be set to the event timestamp. +You can add any other optional annotation values as well. +All values will be passed through `event.sprintf` + +Example: +[source,ruby] + ["title":"Logstash event", "description":"Logstash event for %{host}"] +or +[source,ruby] + ["title":"Logstash event", "description":"Logstash event for %{host}", "parent_id", "1"] + +[id="{version}-plugins-{type}s-{plugin}-api_token"] +===== `api_token` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +This output lets you send annotations to +Circonus based on Logstash events + +Your Circonus API Token + +[id="{version}-plugins-{type}s-{plugin}-app_name"] +===== `app_name` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Circonus App name +This will be passed through `event.sprintf` +so variables are allowed here: + +Example: + `app_name => "%{myappname}"` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/circonus-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/circonus-v3.0.3.asciidoc new file mode 100644 index 000000000..349aa09d8 --- /dev/null +++ b/docs/versioned-plugins/outputs/circonus-v3.0.3.asciidoc @@ -0,0 +1,91 @@ +:plugin: circonus +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-circonus/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Circonus output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output sends annotations to Circonus based on Logstash events. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Circonus Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-annotation>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-api_token>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-app_name>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-annotation"] +===== `annotation` + + * This is a required setting. + * Value type is <> + * Default value is `{}` + +Annotations +Registers an annotation with Circonus +The only required field is `title` and `description`. +`start` and `stop` will be set to the event timestamp. +You can add any other optional annotation values as well. +All values will be passed through `event.sprintf` + +Example: +[source,ruby] + ["title":"Logstash event", "description":"Logstash event for %{host}"] +or +[source,ruby] + ["title":"Logstash event", "description":"Logstash event for %{host}", "parent_id", "1"] + +[id="{version}-plugins-{type}s-{plugin}-api_token"] +===== `api_token` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Circonus API Token + +[id="{version}-plugins-{type}s-{plugin}-app_name"] +===== `app_name` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Circonus App name +This will be passed through `event.sprintf` +so variables are allowed here: + +Example: + `app_name => "%{myappname}"` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/circonus-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/circonus-v3.0.4.asciidoc new file mode 100644 index 000000000..d99c5bae8 --- /dev/null +++ b/docs/versioned-plugins/outputs/circonus-v3.0.4.asciidoc @@ -0,0 +1,91 @@ +:plugin: circonus +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-circonus/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Circonus output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output sends annotations to Circonus based on Logstash events. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Circonus Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-annotation>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-api_token>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-app_name>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-annotation"] +===== `annotation` + + * This is a required setting. + * Value type is <> + * Default value is `{}` + +Annotations +Registers an annotation with Circonus +The only required field is `title` and `description`. +`start` and `stop` will be set to the event timestamp. +You can add any other optional annotation values as well. +All values will be passed through `event.sprintf` + +Example: +[source,ruby] + ["title":"Logstash event", "description":"Logstash event for %{host}"] +or +[source,ruby] + ["title":"Logstash event", "description":"Logstash event for %{host}", "parent_id", "1"] + +[id="{version}-plugins-{type}s-{plugin}-api_token"] +===== `api_token` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Circonus API Token + +[id="{version}-plugins-{type}s-{plugin}-app_name"] +===== `app_name` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Circonus App name +This will be passed through `event.sprintf` +so variables are allowed here: + +Example: + `app_name => "%{myappname}"` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/cloudwatch-index.asciidoc b/docs/versioned-plugins/outputs/cloudwatch-index.asciidoc new file mode 100644 index 000000000..df1cfed83 --- /dev/null +++ b/docs/versioned-plugins/outputs/cloudwatch-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: cloudwatch +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::cloudwatch-v3.0.7.asciidoc[] +include::cloudwatch-v3.0.6.asciidoc[] +include::cloudwatch-v3.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/cloudwatch-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/cloudwatch-v3.0.5.asciidoc new file mode 100644 index 000000000..b66c5f10a --- /dev/null +++ b/docs/versioned-plugins/outputs/cloudwatch-v3.0.5.asciidoc @@ -0,0 +1,317 @@ +:plugin: cloudwatch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-cloudwatch/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Cloudwatch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you aggregate and send metric data to AWS CloudWatch + +==== Summary: +This plugin is intended to be used on a logstash indexer agent (but that +is not the only way, see below.) In the intended scenario, one cloudwatch +output plugin is configured, on the logstash indexer node, with just AWS API +credentials, and possibly a region and/or a namespace. The output looks +for fields present in events, and when it finds them, it uses them to +calculate aggregate statistics. If the `metricname` option is set in this +output, then any events which pass through it will be aggregated & sent to +CloudWatch, but that is not recommended. The intended use is to NOT set the +metricname option here, and instead to add a `CW_metricname` field (and other +fields) to only the events you want sent to CloudWatch. + +When events pass through this output they are queued for background +aggregation and sending, which happens every minute by default. The +queue has a maximum size, and when it is full aggregated statistics will be +sent to CloudWatch ahead of schedule. Whenever this happens a warning +message is written to logstash's log. If you see this you should increase +the `queue_size` configuration option to avoid the extra API calls. The queue +is emptied every time we send data to CloudWatch. + +Note: when logstash is stopped the queue is destroyed before it can be processed. +This is a known limitation of logstash and will hopefully be addressed in a +future version. + +==== Details: +There are two ways to configure this plugin, and they can be used in +combination: event fields & per-output defaults + +Event Field configuration... +You add fields to your events in inputs & filters and this output reads +those fields to aggregate events. The names of the fields read are +configurable via the `field_*` options. + +Per-output defaults... +You set universal defaults in this output plugin's configuration, and +if an event does not have a field for that option then the default is +used. + +Notice, the event fields take precedence over the per-output defaults. + +At a minimum events must have a "metric name" to be sent to CloudWatch. +This can be achieved either by providing a default here OR by adding a +`CW_metricname` field. By default, if no other configuration is provided +besides a metric name, then events will be counted (Unit: Count, Value: 1) +by their metric name (either a default or from their `CW_metricname` field) + +Other fields which can be added to events to modify the behavior of this +plugin are, `CW_namespace`, `CW_unit`, `CW_value`, and +`CW_dimensions`. All of these field names are configurable in +this output. You can also set per-output defaults for any of them. +See below for details. + +Read more about http://aws.amazon.com/cloudwatch/[AWS CloudWatch], +and the specific of API endpoint this output uses, +http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_PutMetricData.html[PutMetricData] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cloudwatch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dimensions>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_dimensions>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_metricname>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_namespace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_unit>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_value>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metricname>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeframe>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-unit>> |<>, one of `["Seconds", "Microseconds", "Milliseconds", "Bytes", "Kilobytes", "Megabytes", "Gigabytes", "Terabytes", "Bits", "Kilobits", "Megabits", "Gigabits", "Terabits", "Percent", "Count", "Bytes/Second", "Kilobytes/Second", "Megabytes/Second", "Gigabytes/Second", "Terabytes/Second", "Bits/Second", "Kilobits/Second", "Megabits/Second", "Gigabits/Second", "Terabits/Second", "Count/Second", "None"]`|No +| <<{version}-plugins-{type}s-{plugin}-value>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-batch_size"] +===== `batch_size` + + * Value type is <> + * Default value is `20` + +How many data points can be given in one call to the CloudWatch API + +[id="{version}-plugins-{type}s-{plugin}-dimensions"] +===== `dimensions` + + * Value type is <> + * There is no default value for this setting. + +The default dimensions [ name, value, ... ] to use for events which do not have a `CW_dimensions` field + +[id="{version}-plugins-{type}s-{plugin}-field_dimensions"] +===== `field_dimensions` + + * Value type is <> + * Default value is `"CW_dimensions"` + +The name of the field used to set the dimensions on an event metric +The field named here, if present in an event, must have an array of +one or more key & value pairs, for example... + `add_field => [ "CW_dimensions", "Environment", "CW_dimensions", "prod" ]` +or, equivalently... + `add_field => [ "CW_dimensions", "Environment" ]` + `add_field => [ "CW_dimensions", "prod" ]` + +[id="{version}-plugins-{type}s-{plugin}-field_metricname"] +===== `field_metricname` + + * Value type is <> + * Default value is `"CW_metricname"` + +The name of the field used to set the metric name on an event +The author of this plugin recommends adding this field to events in inputs & +filters rather than using the per-output default setting so that one output +plugin on your logstash indexer can serve all events (which of course had +fields set on your logstash shippers.) + +[id="{version}-plugins-{type}s-{plugin}-field_namespace"] +===== `field_namespace` + + * Value type is <> + * Default value is `"CW_namespace"` + +The name of the field used to set a different namespace per event +Note: Only one namespace can be sent to CloudWatch per API call +so setting different namespaces will increase the number of API calls +and those cost money. + +[id="{version}-plugins-{type}s-{plugin}-field_unit"] +===== `field_unit` + + * Value type is <> + * Default value is `"CW_unit"` + +The name of the field used to set the unit on an event metric + +[id="{version}-plugins-{type}s-{plugin}-field_value"] +===== `field_value` + + * Value type is <> + * Default value is `"CW_value"` + +The name of the field used to set the value (float) on an event metric + +[id="{version}-plugins-{type}s-{plugin}-metricname"] +===== `metricname` + + * Value type is <> + * There is no default value for this setting. + +The default metric name to use for events which do not have a `CW_metricname` field. +Beware: If this is provided then all events which pass through this output will be aggregated and +sent to CloudWatch, so use this carefully. Furthermore, when providing this option, you +will probably want to also restrict events from passing through this output using event +type, tag, and field matching + +[id="{version}-plugins-{type}s-{plugin}-namespace"] +===== `namespace` + + * Value type is <> + * Default value is `"Logstash"` + +The default namespace to use for events which do not have a `CW_namespace` field + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-queue_size"] +===== `queue_size` + + * Value type is <> + * Default value is `10000` + +How many events to queue before forcing a call to the CloudWatch API ahead of `timeframe` schedule +Set this to the number of events-per-timeframe you will be sending to CloudWatch to avoid extra API calls + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-timeframe"] +===== `timeframe` + + * Value type is <> + * Default value is `"1m"` + +Constants +aggregate_key members +Units +How often to send data to CloudWatch +This does not affect the event timestamps, events will always have their +actual timestamp (to-the-minute) sent to CloudWatch. + +We only call the API if there is data to send. + +See the Rufus Scheduler docs for an https://github.com/jmettraux/rufus-scheduler#the-time-strings-understood-by-rufus-scheduler[explanation of allowed values] + +[id="{version}-plugins-{type}s-{plugin}-unit"] +===== `unit` + + * Value can be any of: `Seconds`, `Microseconds`, `Milliseconds`, `Bytes`, `Kilobytes`, `Megabytes`, `Gigabytes`, `Terabytes`, `Bits`, `Kilobits`, `Megabits`, `Gigabits`, `Terabits`, `Percent`, `Count`, `Bytes/Second`, `Kilobytes/Second`, `Megabytes/Second`, `Gigabytes/Second`, `Terabytes/Second`, `Bits/Second`, `Kilobits/Second`, `Megabits/Second`, `Gigabits/Second`, `Terabits/Second`, `Count/Second`, `None` + * Default value is `"Count"` + +The default unit to use for events which do not have a `CW_unit` field +If you set this option you should probably set the "value" option along with it + +[id="{version}-plugins-{type}s-{plugin}-value"] +===== `value` + + * Value type is <> + * Default value is `"1"` + +The default value to use for events which do not have a `CW_value` field +If provided, this must be a string which can be converted to a float, for example... + "1", "2.34", ".5", and "0.67" +If you set this option you should probably set the `unit` option along with it + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/cloudwatch-v3.0.6.asciidoc b/docs/versioned-plugins/outputs/cloudwatch-v3.0.6.asciidoc new file mode 100644 index 000000000..1dbf14d83 --- /dev/null +++ b/docs/versioned-plugins/outputs/cloudwatch-v3.0.6.asciidoc @@ -0,0 +1,317 @@ +:plugin: cloudwatch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-cloudwatch/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cloudwatch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you aggregate and send metric data to AWS CloudWatch + +==== Summary: +This plugin is intended to be used on a logstash indexer agent (but that +is not the only way, see below.) In the intended scenario, one cloudwatch +output plugin is configured, on the logstash indexer node, with just AWS API +credentials, and possibly a region and/or a namespace. The output looks +for fields present in events, and when it finds them, it uses them to +calculate aggregate statistics. If the `metricname` option is set in this +output, then any events which pass through it will be aggregated & sent to +CloudWatch, but that is not recommended. The intended use is to NOT set the +metricname option here, and instead to add a `CW_metricname` field (and other +fields) to only the events you want sent to CloudWatch. + +When events pass through this output they are queued for background +aggregation and sending, which happens every minute by default. The +queue has a maximum size, and when it is full aggregated statistics will be +sent to CloudWatch ahead of schedule. Whenever this happens a warning +message is written to logstash's log. If you see this you should increase +the `queue_size` configuration option to avoid the extra API calls. The queue +is emptied every time we send data to CloudWatch. + +Note: when logstash is stopped the queue is destroyed before it can be processed. +This is a known limitation of logstash and will hopefully be addressed in a +future version. + +==== Details: +There are two ways to configure this plugin, and they can be used in +combination: event fields & per-output defaults + +Event Field configuration... +You add fields to your events in inputs & filters and this output reads +those fields to aggregate events. The names of the fields read are +configurable via the `field_*` options. + +Per-output defaults... +You set universal defaults in this output plugin's configuration, and +if an event does not have a field for that option then the default is +used. + +Notice, the event fields take precedence over the per-output defaults. + +At a minimum events must have a "metric name" to be sent to CloudWatch. +This can be achieved either by providing a default here OR by adding a +`CW_metricname` field. By default, if no other configuration is provided +besides a metric name, then events will be counted (Unit: Count, Value: 1) +by their metric name (either a default or from their `CW_metricname` field) + +Other fields which can be added to events to modify the behavior of this +plugin are, `CW_namespace`, `CW_unit`, `CW_value`, and +`CW_dimensions`. All of these field names are configurable in +this output. You can also set per-output defaults for any of them. +See below for details. + +Read more about http://aws.amazon.com/cloudwatch/[AWS CloudWatch], +and the specific of API endpoint this output uses, +http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_PutMetricData.html[PutMetricData] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cloudwatch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dimensions>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_dimensions>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_metricname>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_namespace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_unit>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_value>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metricname>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeframe>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-unit>> |<>, one of `["Seconds", "Microseconds", "Milliseconds", "Bytes", "Kilobytes", "Megabytes", "Gigabytes", "Terabytes", "Bits", "Kilobits", "Megabits", "Gigabits", "Terabits", "Percent", "Count", "Bytes/Second", "Kilobytes/Second", "Megabytes/Second", "Gigabytes/Second", "Terabytes/Second", "Bits/Second", "Kilobits/Second", "Megabits/Second", "Gigabits/Second", "Terabits/Second", "Count/Second", "None"]`|No +| <<{version}-plugins-{type}s-{plugin}-value>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-batch_size"] +===== `batch_size` + + * Value type is <> + * Default value is `20` + +How many data points can be given in one call to the CloudWatch API + +[id="{version}-plugins-{type}s-{plugin}-dimensions"] +===== `dimensions` + + * Value type is <> + * There is no default value for this setting. + +The default dimensions [ name, value, ... ] to use for events which do not have a `CW_dimensions` field + +[id="{version}-plugins-{type}s-{plugin}-field_dimensions"] +===== `field_dimensions` + + * Value type is <> + * Default value is `"CW_dimensions"` + +The name of the field used to set the dimensions on an event metric +The field named here, if present in an event, must have an array of +one or more key & value pairs, for example... + `add_field => [ "CW_dimensions", "Environment", "CW_dimensions", "prod" ]` +or, equivalently... + `add_field => [ "CW_dimensions", "Environment" ]` + `add_field => [ "CW_dimensions", "prod" ]` + +[id="{version}-plugins-{type}s-{plugin}-field_metricname"] +===== `field_metricname` + + * Value type is <> + * Default value is `"CW_metricname"` + +The name of the field used to set the metric name on an event +The author of this plugin recommends adding this field to events in inputs & +filters rather than using the per-output default setting so that one output +plugin on your logstash indexer can serve all events (which of course had +fields set on your logstash shippers.) + +[id="{version}-plugins-{type}s-{plugin}-field_namespace"] +===== `field_namespace` + + * Value type is <> + * Default value is `"CW_namespace"` + +The name of the field used to set a different namespace per event +Note: Only one namespace can be sent to CloudWatch per API call +so setting different namespaces will increase the number of API calls +and those cost money. + +[id="{version}-plugins-{type}s-{plugin}-field_unit"] +===== `field_unit` + + * Value type is <> + * Default value is `"CW_unit"` + +The name of the field used to set the unit on an event metric + +[id="{version}-plugins-{type}s-{plugin}-field_value"] +===== `field_value` + + * Value type is <> + * Default value is `"CW_value"` + +The name of the field used to set the value (float) on an event metric + +[id="{version}-plugins-{type}s-{plugin}-metricname"] +===== `metricname` + + * Value type is <> + * There is no default value for this setting. + +The default metric name to use for events which do not have a `CW_metricname` field. +Beware: If this is provided then all events which pass through this output will be aggregated and +sent to CloudWatch, so use this carefully. Furthermore, when providing this option, you +will probably want to also restrict events from passing through this output using event +type, tag, and field matching + +[id="{version}-plugins-{type}s-{plugin}-namespace"] +===== `namespace` + + * Value type is <> + * Default value is `"Logstash"` + +The default namespace to use for events which do not have a `CW_namespace` field + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-queue_size"] +===== `queue_size` + + * Value type is <> + * Default value is `10000` + +How many events to queue before forcing a call to the CloudWatch API ahead of `timeframe` schedule +Set this to the number of events-per-timeframe you will be sending to CloudWatch to avoid extra API calls + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-timeframe"] +===== `timeframe` + + * Value type is <> + * Default value is `"1m"` + +Constants +aggregate_key members +Units +How often to send data to CloudWatch +This does not affect the event timestamps, events will always have their +actual timestamp (to-the-minute) sent to CloudWatch. + +We only call the API if there is data to send. + +See the Rufus Scheduler docs for an https://github.com/jmettraux/rufus-scheduler#the-time-strings-understood-by-rufus-scheduler[explanation of allowed values] + +[id="{version}-plugins-{type}s-{plugin}-unit"] +===== `unit` + + * Value can be any of: `Seconds`, `Microseconds`, `Milliseconds`, `Bytes`, `Kilobytes`, `Megabytes`, `Gigabytes`, `Terabytes`, `Bits`, `Kilobits`, `Megabits`, `Gigabits`, `Terabits`, `Percent`, `Count`, `Bytes/Second`, `Kilobytes/Second`, `Megabytes/Second`, `Gigabytes/Second`, `Terabytes/Second`, `Bits/Second`, `Kilobits/Second`, `Megabits/Second`, `Gigabits/Second`, `Terabits/Second`, `Count/Second`, `None` + * Default value is `"Count"` + +The default unit to use for events which do not have a `CW_unit` field +If you set this option you should probably set the "value" option along with it + +[id="{version}-plugins-{type}s-{plugin}-value"] +===== `value` + + * Value type is <> + * Default value is `"1"` + +The default value to use for events which do not have a `CW_value` field +If provided, this must be a string which can be converted to a float, for example... + "1", "2.34", ".5", and "0.67" +If you set this option you should probably set the `unit` option along with it + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/cloudwatch-v3.0.7.asciidoc b/docs/versioned-plugins/outputs/cloudwatch-v3.0.7.asciidoc new file mode 100644 index 000000000..33ef59919 --- /dev/null +++ b/docs/versioned-plugins/outputs/cloudwatch-v3.0.7.asciidoc @@ -0,0 +1,317 @@ +:plugin: cloudwatch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.7 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-cloudwatch/blob/v3.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Cloudwatch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you aggregate and send metric data to AWS CloudWatch + +==== Summary: +This plugin is intended to be used on a logstash indexer agent (but that +is not the only way, see below.) In the intended scenario, one cloudwatch +output plugin is configured, on the logstash indexer node, with just AWS API +credentials, and possibly a region and/or a namespace. The output looks +for fields present in events, and when it finds them, it uses them to +calculate aggregate statistics. If the `metricname` option is set in this +output, then any events which pass through it will be aggregated & sent to +CloudWatch, but that is not recommended. The intended use is to NOT set the +metricname option here, and instead to add a `CW_metricname` field (and other +fields) to only the events you want sent to CloudWatch. + +When events pass through this output they are queued for background +aggregation and sending, which happens every minute by default. The +queue has a maximum size, and when it is full aggregated statistics will be +sent to CloudWatch ahead of schedule. Whenever this happens a warning +message is written to logstash's log. If you see this you should increase +the `queue_size` configuration option to avoid the extra API calls. The queue +is emptied every time we send data to CloudWatch. + +Note: when logstash is stopped the queue is destroyed before it can be processed. +This is a known limitation of logstash and will hopefully be addressed in a +future version. + +==== Details: +There are two ways to configure this plugin, and they can be used in +combination: event fields & per-output defaults + +Event Field configuration... +You add fields to your events in inputs & filters and this output reads +those fields to aggregate events. The names of the fields read are +configurable via the `field_*` options. + +Per-output defaults... +You set universal defaults in this output plugin's configuration, and +if an event does not have a field for that option then the default is +used. + +Notice, the event fields take precedence over the per-output defaults. + +At a minimum events must have a "metric name" to be sent to CloudWatch. +This can be achieved either by providing a default here OR by adding a +`CW_metricname` field. By default, if no other configuration is provided +besides a metric name, then events will be counted (Unit: Count, Value: 1) +by their metric name (either a default or from their `CW_metricname` field) + +Other fields which can be added to events to modify the behavior of this +plugin are, `CW_namespace`, `CW_unit`, `CW_value`, and +`CW_dimensions`. All of these field names are configurable in +this output. You can also set per-output defaults for any of them. +See below for details. + +Read more about http://aws.amazon.com/cloudwatch/[AWS CloudWatch], +and the specific of API endpoint this output uses, +http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_PutMetricData.html[PutMetricData] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Cloudwatch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dimensions>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_dimensions>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_metricname>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_namespace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_unit>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-field_value>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metricname>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeframe>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-unit>> |<>, one of `["Seconds", "Microseconds", "Milliseconds", "Bytes", "Kilobytes", "Megabytes", "Gigabytes", "Terabytes", "Bits", "Kilobits", "Megabits", "Gigabits", "Terabits", "Percent", "Count", "Bytes/Second", "Kilobytes/Second", "Megabytes/Second", "Gigabytes/Second", "Terabytes/Second", "Bits/Second", "Kilobits/Second", "Megabits/Second", "Gigabits/Second", "Terabits/Second", "Count/Second", "None"]`|No +| <<{version}-plugins-{type}s-{plugin}-value>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-batch_size"] +===== `batch_size` + + * Value type is <> + * Default value is `20` + +How many data points can be given in one call to the CloudWatch API + +[id="{version}-plugins-{type}s-{plugin}-dimensions"] +===== `dimensions` + + * Value type is <> + * There is no default value for this setting. + +The default dimensions [ name, value, ... ] to use for events which do not have a `CW_dimensions` field + +[id="{version}-plugins-{type}s-{plugin}-field_dimensions"] +===== `field_dimensions` + + * Value type is <> + * Default value is `"CW_dimensions"` + +The name of the field used to set the dimensions on an event metric +The field named here, if present in an event, must have an array of +one or more key & value pairs, for example... + `add_field => [ "CW_dimensions", "Environment", "CW_dimensions", "prod" ]` +or, equivalently... + `add_field => [ "CW_dimensions", "Environment" ]` + `add_field => [ "CW_dimensions", "prod" ]` + +[id="{version}-plugins-{type}s-{plugin}-field_metricname"] +===== `field_metricname` + + * Value type is <> + * Default value is `"CW_metricname"` + +The name of the field used to set the metric name on an event +The author of this plugin recommends adding this field to events in inputs & +filters rather than using the per-output default setting so that one output +plugin on your logstash indexer can serve all events (which of course had +fields set on your logstash shippers.) + +[id="{version}-plugins-{type}s-{plugin}-field_namespace"] +===== `field_namespace` + + * Value type is <> + * Default value is `"CW_namespace"` + +The name of the field used to set a different namespace per event +Note: Only one namespace can be sent to CloudWatch per API call +so setting different namespaces will increase the number of API calls +and those cost money. + +[id="{version}-plugins-{type}s-{plugin}-field_unit"] +===== `field_unit` + + * Value type is <> + * Default value is `"CW_unit"` + +The name of the field used to set the unit on an event metric + +[id="{version}-plugins-{type}s-{plugin}-field_value"] +===== `field_value` + + * Value type is <> + * Default value is `"CW_value"` + +The name of the field used to set the value (float) on an event metric + +[id="{version}-plugins-{type}s-{plugin}-metricname"] +===== `metricname` + + * Value type is <> + * There is no default value for this setting. + +The default metric name to use for events which do not have a `CW_metricname` field. +Beware: If this is provided then all events which pass through this output will be aggregated and +sent to CloudWatch, so use this carefully. Furthermore, when providing this option, you +will probably want to also restrict events from passing through this output using event +type, tag, and field matching + +[id="{version}-plugins-{type}s-{plugin}-namespace"] +===== `namespace` + + * Value type is <> + * Default value is `"Logstash"` + +The default namespace to use for events which do not have a `CW_namespace` field + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-queue_size"] +===== `queue_size` + + * Value type is <> + * Default value is `10000` + +How many events to queue before forcing a call to the CloudWatch API ahead of `timeframe` schedule +Set this to the number of events-per-timeframe you will be sending to CloudWatch to avoid extra API calls + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-timeframe"] +===== `timeframe` + + * Value type is <> + * Default value is `"1m"` + +Constants +aggregate_key members +Units +How often to send data to CloudWatch +This does not affect the event timestamps, events will always have their +actual timestamp (to-the-minute) sent to CloudWatch. + +We only call the API if there is data to send. + +See the Rufus Scheduler docs for an https://github.com/jmettraux/rufus-scheduler#the-time-strings-understood-by-rufus-scheduler[explanation of allowed values] + +[id="{version}-plugins-{type}s-{plugin}-unit"] +===== `unit` + + * Value can be any of: `Seconds`, `Microseconds`, `Milliseconds`, `Bytes`, `Kilobytes`, `Megabytes`, `Gigabytes`, `Terabytes`, `Bits`, `Kilobits`, `Megabits`, `Gigabits`, `Terabits`, `Percent`, `Count`, `Bytes/Second`, `Kilobytes/Second`, `Megabytes/Second`, `Gigabytes/Second`, `Terabytes/Second`, `Bits/Second`, `Kilobits/Second`, `Megabits/Second`, `Gigabits/Second`, `Terabits/Second`, `Count/Second`, `None` + * Default value is `"Count"` + +The default unit to use for events which do not have a `CW_unit` field +If you set this option you should probably set the "value" option along with it + +[id="{version}-plugins-{type}s-{plugin}-value"] +===== `value` + + * Value type is <> + * Default value is `"1"` + +The default value to use for events which do not have a `CW_value` field +If provided, this must be a string which can be converted to a float, for example... + "1", "2.34", ".5", and "0.67" +If you set this option you should probably set the `unit` option along with it + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/csv-index.asciidoc b/docs/versioned-plugins/outputs/csv-index.asciidoc new file mode 100644 index 000000000..3747f9fd3 --- /dev/null +++ b/docs/versioned-plugins/outputs/csv-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: csv +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::csv-v3.0.6.asciidoc[] +include::csv-v3.0.5.asciidoc[] +include::csv-v3.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/csv-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/csv-v3.0.4.asciidoc new file mode 100644 index 000000000..2130816c2 --- /dev/null +++ b/docs/versioned-plugins/outputs/csv-v3.0.4.asciidoc @@ -0,0 +1,175 @@ +:plugin: csv +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-csv/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Csv output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +CSV output. + +Write events to disk in CSV or other delimited format +Based on the file output, many config values are shared +Uses the Ruby csv library internally + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Csv Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-csv_options>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-spreadsheet_safe>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] +===== `create_if_deleted` + + * Value type is <> + * Default value is `true` + +If the configured file is deleted, but an event is handled by the plugin, +the plugin will recreate the file. Default => true + +[id="{version}-plugins-{type}s-{plugin}-csv_options"] +===== `csv_options` + + * Value type is <> + * Default value is `{}` + +Options for CSV output. This is passed directly to the Ruby stdlib to_csv function. +Full documentation is available on the http://ruby-doc.org/stdlib-2.0.0/libdoc/csv/rdoc/index.html[Ruby CSV documentation page]. +A typical use case would be to use alternative column or row seperators eg: `csv_options => {"col_sep" => "\t" "row_sep" => "\r\n"}` gives tab seperated data with windows line endings + +[id="{version}-plugins-{type}s-{plugin}-dir_mode"] +===== `dir_mode` + + * Value type is <> + * Default value is `-1` + +Dir access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"dir_mode" => 0750` + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field names from the event that should be written to the CSV file. +Fields are written to the CSV in the same order as the array. +If a field does not exist on the event, an empty string will be written. +Supports field reference syntax eg: `fields => ["field1", "[nested][field]"]`. + +[id="{version}-plugins-{type}s-{plugin}-file_mode"] +===== `file_mode` + + * Value type is <> + * Default value is `-1` + +File access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"file_mode" => 0640` + +[id="{version}-plugins-{type}s-{plugin}-filename_failure"] +===== `filename_failure` + + * Value type is <> + * Default value is `"_filepath_failures"` + +If the generated path is invalid, the events will be saved +into this file and inside the defined path. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is <> + * Default value is `2` + +Flush interval (in seconds) for flushing writes to log files. +0 will flush on every message. + +[id="{version}-plugins-{type}s-{plugin}-gzip"] +===== `gzip` + + * Value type is <> + * Default value is `false` + +Gzip the output stream before writing to disk. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +This output writes events to files on disk. You can use fields +from the event as parts of the filename and/or path. + +By default, this output writes one event per line in **json** format. +You can customise the line format using the `line` codec like +[source,ruby] +output { + file { + path => ... + codec => line { format => "custom format: %{message}"} + } +} +The path to the file to write. Event fields can be used here, +like `/var/log/logstash/%{host}/%{application}` +One may also utilize the path option for date-based log +rotation via the joda time format. This will use the event +timestamp. +E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create +`./test-2013-05-29.txt` + +If you use an absolute path you cannot start with a dynamic string. +E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths + +[id="{version}-plugins-{type}s-{plugin}-spreadsheet_safe"] +===== `spreadsheet_safe` + + * Value type is <> + * Default value is `true` + +Option to not escape/munge string values. Please note turning off this option +may not make the values safe in your spreadsheet application + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/csv-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/csv-v3.0.5.asciidoc new file mode 100644 index 000000000..795a42e69 --- /dev/null +++ b/docs/versioned-plugins/outputs/csv-v3.0.5.asciidoc @@ -0,0 +1,175 @@ +:plugin: csv +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-csv/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Csv output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +CSV output. + +Write events to disk in CSV or other delimited format +Based on the file output, many config values are shared +Uses the Ruby csv library internally + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Csv Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-csv_options>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-spreadsheet_safe>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] +===== `create_if_deleted` + + * Value type is <> + * Default value is `true` + +If the configured file is deleted, but an event is handled by the plugin, +the plugin will recreate the file. Default => true + +[id="{version}-plugins-{type}s-{plugin}-csv_options"] +===== `csv_options` + + * Value type is <> + * Default value is `{}` + +Options for CSV output. This is passed directly to the Ruby stdlib to_csv function. +Full documentation is available on the http://ruby-doc.org/stdlib-2.0.0/libdoc/csv/rdoc/index.html[Ruby CSV documentation page]. +A typical use case would be to use alternative column or row seperators eg: `csv_options => {"col_sep" => "\t" "row_sep" => "\r\n"}` gives tab seperated data with windows line endings + +[id="{version}-plugins-{type}s-{plugin}-dir_mode"] +===== `dir_mode` + + * Value type is <> + * Default value is `-1` + +Dir access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"dir_mode" => 0750` + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field names from the event that should be written to the CSV file. +Fields are written to the CSV in the same order as the array. +If a field does not exist on the event, an empty string will be written. +Supports field reference syntax eg: `fields => ["field1", "[nested][field]"]`. + +[id="{version}-plugins-{type}s-{plugin}-file_mode"] +===== `file_mode` + + * Value type is <> + * Default value is `-1` + +File access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"file_mode" => 0640` + +[id="{version}-plugins-{type}s-{plugin}-filename_failure"] +===== `filename_failure` + + * Value type is <> + * Default value is `"_filepath_failures"` + +If the generated path is invalid, the events will be saved +into this file and inside the defined path. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is <> + * Default value is `2` + +Flush interval (in seconds) for flushing writes to log files. +0 will flush on every message. + +[id="{version}-plugins-{type}s-{plugin}-gzip"] +===== `gzip` + + * Value type is <> + * Default value is `false` + +Gzip the output stream before writing to disk. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +This output writes events to files on disk. You can use fields +from the event as parts of the filename and/or path. + +By default, this output writes one event per line in **json** format. +You can customise the line format using the `line` codec like +[source,ruby] +output { + file { + path => ... + codec => line { format => "custom format: %{message}"} + } +} +The path to the file to write. Event fields can be used here, +like `/var/log/logstash/%{host}/%{application}` +One may also utilize the path option for date-based log +rotation via the joda time format. This will use the event +timestamp. +E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create +`./test-2013-05-29.txt` + +If you use an absolute path you cannot start with a dynamic string. +E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths + +[id="{version}-plugins-{type}s-{plugin}-spreadsheet_safe"] +===== `spreadsheet_safe` + + * Value type is <> + * Default value is `true` + +Option to not escape/munge string values. Please note turning off this option +may not make the values safe in your spreadsheet application + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/csv-v3.0.6.asciidoc b/docs/versioned-plugins/outputs/csv-v3.0.6.asciidoc new file mode 100644 index 000000000..f5526fded --- /dev/null +++ b/docs/versioned-plugins/outputs/csv-v3.0.6.asciidoc @@ -0,0 +1,175 @@ +:plugin: csv +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-csv/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Csv output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +CSV output. + +Write events to disk in CSV or other delimited format +Based on the file output, many config values are shared +Uses the Ruby csv library internally + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Csv Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-csv_options>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-spreadsheet_safe>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] +===== `create_if_deleted` + + * Value type is <> + * Default value is `true` + +If the configured file is deleted, but an event is handled by the plugin, +the plugin will recreate the file. Default => true + +[id="{version}-plugins-{type}s-{plugin}-csv_options"] +===== `csv_options` + + * Value type is <> + * Default value is `{}` + +Options for CSV output. This is passed directly to the Ruby stdlib to_csv function. +Full documentation is available on the http://ruby-doc.org/stdlib-2.0.0/libdoc/csv/rdoc/index.html[Ruby CSV documentation page]. +A typical use case would be to use alternative column or row seperators eg: `csv_options => {"col_sep" => "\t" "row_sep" => "\r\n"}` gives tab seperated data with windows line endings + +[id="{version}-plugins-{type}s-{plugin}-dir_mode"] +===== `dir_mode` + + * Value type is <> + * Default value is `-1` + +Dir access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"dir_mode" => 0750` + +[id="{version}-plugins-{type}s-{plugin}-fields"] +===== `fields` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field names from the event that should be written to the CSV file. +Fields are written to the CSV in the same order as the array. +If a field does not exist on the event, an empty string will be written. +Supports field reference syntax eg: `fields => ["field1", "[nested][field]"]`. + +[id="{version}-plugins-{type}s-{plugin}-file_mode"] +===== `file_mode` + + * Value type is <> + * Default value is `-1` + +File access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"file_mode" => 0640` + +[id="{version}-plugins-{type}s-{plugin}-filename_failure"] +===== `filename_failure` + + * Value type is <> + * Default value is `"_filepath_failures"` + +If the generated path is invalid, the events will be saved +into this file and inside the defined path. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is <> + * Default value is `2` + +Flush interval (in seconds) for flushing writes to log files. +0 will flush on every message. + +[id="{version}-plugins-{type}s-{plugin}-gzip"] +===== `gzip` + + * Value type is <> + * Default value is `false` + +Gzip the output stream before writing to disk. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +This output writes events to files on disk. You can use fields +from the event as parts of the filename and/or path. + +By default, this output writes one event per line in **json** format. +You can customise the line format using the `line` codec like +[source,ruby] +output { + file { + path => ... + codec => line { format => "custom format: %{message}"} + } +} +The path to the file to write. Event fields can be used here, +like `/var/log/logstash/%{host}/%{application}` +One may also utilize the path option for date-based log +rotation via the joda time format. This will use the event +timestamp. +E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create +`./test-2013-05-29.txt` + +If you use an absolute path you cannot start with a dynamic string. +E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths + +[id="{version}-plugins-{type}s-{plugin}-spreadsheet_safe"] +===== `spreadsheet_safe` + + * Value type is <> + * Default value is `true` + +Option to not escape/munge string values. Please note turning off this option +may not make the values safe in your spreadsheet application + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/datadog-index.asciidoc b/docs/versioned-plugins/outputs/datadog-index.asciidoc new file mode 100644 index 000000000..0b3f5a51e --- /dev/null +++ b/docs/versioned-plugins/outputs/datadog-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: datadog +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::datadog-v3.0.4.asciidoc[] +include::datadog-v3.0.3.asciidoc[] +include::datadog-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/datadog-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/datadog-v3.0.1.asciidoc new file mode 100644 index 000000000..60b6aca0d --- /dev/null +++ b/docs/versioned-plugins/outputs/datadog-v3.0.1.asciidoc @@ -0,0 +1,124 @@ +:plugin: datadog +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-datadog/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Datadog output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Datadog Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-alert_type>> |<>, one of `["info", "error", "warning", "success"]`|No +| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-date_happened>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dd_tags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-priority>> |<>, one of `["normal", "low"]`|No +| <<{version}-plugins-{type}s-{plugin}-source_type_name>> |<>, one of `["nagios", "hudson", "jenkins", "user", "my apps", "feed", "chef", "puppet", "git", "bitbucket", "fabric", "capistrano"]`|No +| <<{version}-plugins-{type}s-{plugin}-text>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-title>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-alert_type"] +===== `alert_type` + + * Value can be any of: `info`, `error`, `warning`, `success` + * There is no default value for this setting. + +Alert type + +[id="{version}-plugins-{type}s-{plugin}-api_key"] +===== `api_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +This output lets you send events (for now. soon metrics) to +DataDogHQ based on Logstash events + +Note that since Logstash maintains no state +these will be one-shot events + +Your DatadogHQ API key + +[id="{version}-plugins-{type}s-{plugin}-date_happened"] +===== `date_happened` + + * Value type is <> + * There is no default value for this setting. + +Date Happened + +[id="{version}-plugins-{type}s-{plugin}-dd_tags"] +===== `dd_tags` + + * Value type is <> + * There is no default value for this setting. + +Tags +Set any custom tags for this event +Default are the Logstash tags if any + +[id="{version}-plugins-{type}s-{plugin}-priority"] +===== `priority` + + * Value can be any of: `normal`, `low` + * There is no default value for this setting. + +Priority + +[id="{version}-plugins-{type}s-{plugin}-source_type_name"] +===== `source_type_name` + + * Value can be any of: `nagios`, `hudson`, `jenkins`, `user`, `my apps`, `feed`, `chef`, `puppet`, `git`, `bitbucket`, `fabric`, `capistrano` + * Default value is `"my apps"` + +Source type name + +[id="{version}-plugins-{type}s-{plugin}-text"] +===== `text` + + * Value type is <> + * Default value is `"%{message}"` + +Text + +[id="{version}-plugins-{type}s-{plugin}-title"] +===== `title` + + * Value type is <> + * Default value is `"Logstash event for %{host}"` + +Title + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/datadog-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/datadog-v3.0.3.asciidoc new file mode 100644 index 000000000..4f9991b5b --- /dev/null +++ b/docs/versioned-plugins/outputs/datadog-v3.0.3.asciidoc @@ -0,0 +1,122 @@ +:plugin: datadog +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-datadog/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Datadog output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output sends events to DataDogHQ based on Logstash events. + +Note that since Logstash maintains no state +these will be one-shot events + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Datadog Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-alert_type>> |<>, one of `["info", "error", "warning", "success"]`|No +| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-date_happened>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dd_tags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-priority>> |<>, one of `["normal", "low"]`|No +| <<{version}-plugins-{type}s-{plugin}-source_type_name>> |<>, one of `["nagios", "hudson", "jenkins", "user", "my apps", "feed", "chef", "puppet", "git", "bitbucket", "fabric", "capistrano"]`|No +| <<{version}-plugins-{type}s-{plugin}-text>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-title>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-alert_type"] +===== `alert_type` + + * Value can be any of: `info`, `error`, `warning`, `success` + * There is no default value for this setting. + +Alert type + +[id="{version}-plugins-{type}s-{plugin}-api_key"] +===== `api_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your DatadogHQ API key + +[id="{version}-plugins-{type}s-{plugin}-date_happened"] +===== `date_happened` + + * Value type is <> + * There is no default value for this setting. + +Date Happened + +[id="{version}-plugins-{type}s-{plugin}-dd_tags"] +===== `dd_tags` + + * Value type is <> + * There is no default value for this setting. + +Tags +Set any custom tags for this event +Default are the Logstash tags if any + +[id="{version}-plugins-{type}s-{plugin}-priority"] +===== `priority` + + * Value can be any of: `normal`, `low` + * There is no default value for this setting. + +Priority + +[id="{version}-plugins-{type}s-{plugin}-source_type_name"] +===== `source_type_name` + + * Value can be any of: `nagios`, `hudson`, `jenkins`, `user`, `my apps`, `feed`, `chef`, `puppet`, `git`, `bitbucket`, `fabric`, `capistrano` + * Default value is `"my apps"` + +Source type name + +[id="{version}-plugins-{type}s-{plugin}-text"] +===== `text` + + * Value type is <> + * Default value is `"%{message}"` + +Text + +[id="{version}-plugins-{type}s-{plugin}-title"] +===== `title` + + * Value type is <> + * Default value is `"Logstash event for %{host}"` + +Title + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/datadog-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/datadog-v3.0.4.asciidoc new file mode 100644 index 000000000..914cf87d5 --- /dev/null +++ b/docs/versioned-plugins/outputs/datadog-v3.0.4.asciidoc @@ -0,0 +1,122 @@ +:plugin: datadog +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-datadog/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Datadog output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output sends events to DataDogHQ based on Logstash events. + +Note that since Logstash maintains no state +these will be one-shot events + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Datadog Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-alert_type>> |<>, one of `["info", "error", "warning", "success"]`|No +| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-date_happened>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dd_tags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-priority>> |<>, one of `["normal", "low"]`|No +| <<{version}-plugins-{type}s-{plugin}-source_type_name>> |<>, one of `["nagios", "hudson", "jenkins", "user", "my apps", "feed", "chef", "puppet", "git", "bitbucket", "fabric", "capistrano"]`|No +| <<{version}-plugins-{type}s-{plugin}-text>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-title>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-alert_type"] +===== `alert_type` + + * Value can be any of: `info`, `error`, `warning`, `success` + * There is no default value for this setting. + +Alert type + +[id="{version}-plugins-{type}s-{plugin}-api_key"] +===== `api_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your DatadogHQ API key + +[id="{version}-plugins-{type}s-{plugin}-date_happened"] +===== `date_happened` + + * Value type is <> + * There is no default value for this setting. + +Date Happened + +[id="{version}-plugins-{type}s-{plugin}-dd_tags"] +===== `dd_tags` + + * Value type is <> + * There is no default value for this setting. + +Tags +Set any custom tags for this event +Default are the Logstash tags if any + +[id="{version}-plugins-{type}s-{plugin}-priority"] +===== `priority` + + * Value can be any of: `normal`, `low` + * There is no default value for this setting. + +Priority + +[id="{version}-plugins-{type}s-{plugin}-source_type_name"] +===== `source_type_name` + + * Value can be any of: `nagios`, `hudson`, `jenkins`, `user`, `my apps`, `feed`, `chef`, `puppet`, `git`, `bitbucket`, `fabric`, `capistrano` + * Default value is `"my apps"` + +Source type name + +[id="{version}-plugins-{type}s-{plugin}-text"] +===== `text` + + * Value type is <> + * Default value is `"%{message}"` + +Text + +[id="{version}-plugins-{type}s-{plugin}-title"] +===== `title` + + * Value type is <> + * Default value is `"Logstash event for %{host}"` + +Title + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/datadog_metrics-index.asciidoc b/docs/versioned-plugins/outputs/datadog_metrics-index.asciidoc new file mode 100644 index 000000000..9d03fa92a --- /dev/null +++ b/docs/versioned-plugins/outputs/datadog_metrics-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: datadog_metrics +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-21 +| <> | 2017-06-23 +|======================================================================= + +include::datadog_metrics-v3.0.2.asciidoc[] +include::datadog_metrics-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/datadog_metrics-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/datadog_metrics-v3.0.1.asciidoc new file mode 100644 index 000000000..a91b31451 --- /dev/null +++ b/docs/versioned-plugins/outputs/datadog_metrics-v3.0.1.asciidoc @@ -0,0 +1,130 @@ +:plugin: datadog_metrics +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-datadog_metrics/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Datadog_metrics output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you send metrics to +DataDogHQ based on Logstash events. +Default `queue_size` and `timeframe` are low in order to provide near realtime alerting. +If you do not use Datadog for alerting, consider raising these thresholds. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Datadog_metrics Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-dd_tags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-device>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metric_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metric_type>> |<>, one of `["gauge", "counter", "%{metric_type}"]`|No +| <<{version}-plugins-{type}s-{plugin}-metric_value>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeframe>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-api_key"] +===== `api_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your DatadogHQ API key. https://app.datadoghq.com/account/settings#api + +[id="{version}-plugins-{type}s-{plugin}-dd_tags"] +===== `dd_tags` + + * Value type is <> + * There is no default value for this setting. + +Set any custom tags for this event, +default are the Logstash tags if any. + +[id="{version}-plugins-{type}s-{plugin}-device"] +===== `device` + + * Value type is <> + * Default value is `"%{metric_device}"` + +The name of the device that produced the metric. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"%{host}"` + +The name of the host that produced the metric. + +[id="{version}-plugins-{type}s-{plugin}-metric_name"] +===== `metric_name` + + * Value type is <> + * Default value is `"%{metric_name}"` + +The name of the time series. + +[id="{version}-plugins-{type}s-{plugin}-metric_type"] +===== `metric_type` + + * Value can be any of: `gauge`, `counter`, `%{metric_type}` + * Default value is `"%{metric_type}"` + +The type of the metric. + +[id="{version}-plugins-{type}s-{plugin}-metric_value"] +===== `metric_value` + + * Value type is <> + * Default value is `"%{metric_value}"` + +The value. + +[id="{version}-plugins-{type}s-{plugin}-queue_size"] +===== `queue_size` + + * Value type is <> + * Default value is `10` + +How many events to queue before flushing to Datadog +prior to schedule set in `@timeframe` + +[id="{version}-plugins-{type}s-{plugin}-timeframe"] +===== `timeframe` + + * Value type is <> + * Default value is `10` + +How often (in seconds) to flush queued events to Datadog + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/datadog_metrics-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/datadog_metrics-v3.0.2.asciidoc new file mode 100644 index 000000000..454b6feac --- /dev/null +++ b/docs/versioned-plugins/outputs/datadog_metrics-v3.0.2.asciidoc @@ -0,0 +1,130 @@ +:plugin: datadog_metrics +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-21 +:changelog_url: https://github.com/logstash-plugins/logstash-output-datadog_metrics/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Datadog_metrics output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you send metrics to +DataDogHQ based on Logstash events. +Default `queue_size` and `timeframe` are low in order to provide near realtime alerting. +If you do not use Datadog for alerting, consider raising these thresholds. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Datadog_metrics Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-dd_tags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-device>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metric_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metric_type>> |<>, one of `["gauge", "counter", "%{metric_type}"]`|No +| <<{version}-plugins-{type}s-{plugin}-metric_value>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeframe>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-api_key"] +===== `api_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your DatadogHQ API key. https://app.datadoghq.com/account/settings#api + +[id="{version}-plugins-{type}s-{plugin}-dd_tags"] +===== `dd_tags` + + * Value type is <> + * There is no default value for this setting. + +Set any custom tags for this event, +default are the Logstash tags if any. + +[id="{version}-plugins-{type}s-{plugin}-device"] +===== `device` + + * Value type is <> + * Default value is `"%{metric_device}"` + +The name of the device that produced the metric. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"%{host}"` + +The name of the host that produced the metric. + +[id="{version}-plugins-{type}s-{plugin}-metric_name"] +===== `metric_name` + + * Value type is <> + * Default value is `"%{metric_name}"` + +The name of the time series. + +[id="{version}-plugins-{type}s-{plugin}-metric_type"] +===== `metric_type` + + * Value can be any of: `gauge`, `counter`, `%{metric_type}` + * Default value is `"%{metric_type}"` + +The type of the metric. + +[id="{version}-plugins-{type}s-{plugin}-metric_value"] +===== `metric_value` + + * Value type is <> + * Default value is `"%{metric_value}"` + +The value. + +[id="{version}-plugins-{type}s-{plugin}-queue_size"] +===== `queue_size` + + * Value type is <> + * Default value is `10` + +How many events to queue before flushing to Datadog +prior to schedule set in `@timeframe` + +[id="{version}-plugins-{type}s-{plugin}-timeframe"] +===== `timeframe` + + * Value type is <> + * Default value is `10` + +How often (in seconds) to flush queued events to Datadog + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-index.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-index.asciidoc new file mode 100644 index 000000000..ceb808ddf --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-index.asciidoc @@ -0,0 +1,44 @@ +:plugin: elasticsearch +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-30 +| <> | 2017-09-29 +| <> | 2017-09-21 +| <> | 2017-09-21 +| <> | 2017-08-21 +| <> | 2017-08-16 +| <> | 2017-08-01 +| <> | 2017-09-22 +| <> | 2017-09-21 +| <> | 2017-08-21 +| <> | 2017-07-20 +| <> | 2017-07-18 +| <> | 2017-06-23 +| <> | 2017-06-09 +| <> | 2017-06-06 +| <> | 2017-06-05 +| <> | 2017-05-26 +|======================================================================= + +include::elasticsearch-v9.0.2.asciidoc[] +include::elasticsearch-v9.0.0.asciidoc[] +include::elasticsearch-v8.2.2.asciidoc[] +include::elasticsearch-v8.2.0.asciidoc[] +include::elasticsearch-v8.1.1.asciidoc[] +include::elasticsearch-v8.0.1.asciidoc[] +include::elasticsearch-v8.0.0.asciidoc[] +include::elasticsearch-v7.4.2.asciidoc[] +include::elasticsearch-v7.4.1.asciidoc[] +include::elasticsearch-v7.4.0.asciidoc[] +include::elasticsearch-v7.3.8.asciidoc[] +include::elasticsearch-v7.3.7.asciidoc[] +include::elasticsearch-v7.3.6.asciidoc[] +include::elasticsearch-v7.3.5.asciidoc[] +include::elasticsearch-v7.3.4.asciidoc[] +include::elasticsearch-v7.3.3.asciidoc[] +include::elasticsearch-v7.3.2.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.2.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.2.asciidoc new file mode 100644 index 000000000..c4445c489 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.2.asciidoc @@ -0,0 +1,679 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.3.2 +:release_date: 2017-05-26 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.2/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Elasticsearch + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is <> + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is <> + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is <> + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is <> + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is <> + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.3.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.3.asciidoc new file mode 100644 index 000000000..381cdacb1 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.3.asciidoc @@ -0,0 +1,679 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.3.3 +:release_date: 2017-06-05 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.3/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Elasticsearch + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is <> + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is <> + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is <> + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is <> + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is <> + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.4.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.4.asciidoc new file mode 100644 index 000000000..a0fd41326 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.4.asciidoc @@ -0,0 +1,679 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.3.4 +:release_date: 2017-06-06 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.4/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Elasticsearch + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is <> + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is <> + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is <> + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is <> + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is <> + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.5.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.5.asciidoc new file mode 100644 index 000000000..7a76e1b95 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.5.asciidoc @@ -0,0 +1,679 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.3.5 +:release_date: 2017-06-09 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.5/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Elasticsearch + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is <> + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is <> + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is <> + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is <> + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is <> + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.6.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.6.asciidoc new file mode 100644 index 000000000..d42111b16 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.6.asciidoc @@ -0,0 +1,680 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.3.6 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is <> + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is <> + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is <> + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is <> + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is <> + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.7.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.7.asciidoc new file mode 100644 index 000000000..b04856a5f --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.7.asciidoc @@ -0,0 +1,680 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.3.7 +:release_date: 2017-07-18 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is <> + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is <> + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is <> + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is <> + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is <> + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.8.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.8.asciidoc new file mode 100644 index 000000000..57dbcc3fd --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.8.asciidoc @@ -0,0 +1,680 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.3.8 +:release_date: 2017-07-20 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.8/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is <> + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is <> + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is <> + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is <> + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is <> + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.4.0.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.4.0.asciidoc new file mode 100644 index 000000000..6e574e482 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.4.0.asciidoc @@ -0,0 +1,681 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.4.0 +:release_date: 2017-08-21 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.4.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 7.4.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: +- 400 and 404 errors are sent to the DLQ if enabled. If a DLQ is not enabled a log message will be emitted and the event will be dropped. +- 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is <> + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is <> + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is <> + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is <> + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is <> + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.4.1.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.4.1.asciidoc new file mode 100644 index 000000000..46279a757 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.4.1.asciidoc @@ -0,0 +1,681 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.4.1 +:release_date: 2017-09-21 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.4.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 7.4.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: +- 400 and 404 errors are sent to the DLQ if enabled. If a DLQ is not enabled a log message will be emitted and the event will be dropped. +- 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is <> + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is <> + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is <> + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is <> + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is <> + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.4.2.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.4.2.asciidoc new file mode 100644 index 000000000..2d38f1601 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.4.2.asciidoc @@ -0,0 +1,698 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.4.2 +:release_date: 2017-09-22 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.4.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x + +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +**Users installing ES 5.x and LS 5.x** + +This change will not affect you and you will continue to use the ES defaults. + +**Users upgrading from LS 2.x to LS 5.x with ES 5.x** + +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 7.4.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: + +* 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <> for more info. +* 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +[[dlq-policy]] +==== DLQ Policy + +Mapping (404) errors from Elasticsearch can lead to data loss. Unfortunately +mapping errors cannot be handled without human intervention and without looking +at the field that caused the mapping mismatch. If the DLQ is enabled, the +original events causing the mapping errors are stored in a file that can be +processed at a later time. Often times, the offending field can be removed and +re-indexed to Elasticsearch. If the DLQ is not enabled, and a mapping error +happens, the problem is logged as a warning, and the event is dropped. See +<> for more information about processing events in the DLQ. + +==== Batch Sizes + +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is <> + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is <> + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is <> + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is <> + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is <> + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.0.0.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.0.0.asciidoc new file mode 100644 index 000000000..4330bbf8b --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v8.0.0.asciidoc @@ -0,0 +1,662 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v8.0.0 +:release_date: 2017-08-01 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v8.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is <> + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is <> + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is <> + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is <> + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is <> + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.0.1.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.0.1.asciidoc new file mode 100644 index 000000000..908aa3e4c --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v8.0.1.asciidoc @@ -0,0 +1,662 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v8.0.1 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v8.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 2.2.0 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. + +The following errors are retried infinitely: + +- Network errors (inability to connect) +- 429 (Too many requests) and +- 503 (Service unavailable) errors + +NOTE: 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is <> + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is <> + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is <> + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is <> + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is <> + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc new file mode 100644 index 000000000..71e909104 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc @@ -0,0 +1,663 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v8.1.1 +:release_date: 2017-08-21 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v8.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 8.1.1 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: +- 400 and 404 errors are sent to the DLQ if enabled. If a DLQ is not enabled a log message will be emitted and the event will be dropped. +- 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is <> + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is <> + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is <> + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is <> + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is <> + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.2.0.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.2.0.asciidoc new file mode 100644 index 000000000..ec66eda65 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v8.2.0.asciidoc @@ -0,0 +1,663 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v8.2.0 +:release_date: 2017-09-21 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v8.2.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +** Users installing ES 5.x and LS 5.x ** +This change will not affect you and you will continue to use the ES defaults. + +** Users upgrading from LS 2.x to LS 5.x with ES 5.x ** +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 8.1.1 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: +- 400 and 404 errors are sent to the DLQ if enabled. If a DLQ is not enabled a log message will be emitted and the event will be dropped. +- 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +==== Batch Sizes ==== +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is <> + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is <> + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is <> + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is <> + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is <> + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.2.2.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.2.2.asciidoc new file mode 100644 index 000000000..b533ea863 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v8.2.2.asciidoc @@ -0,0 +1,680 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v8.2.2 +:release_date: 2017-09-21 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v8.2.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x + +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +**Users installing ES 5.x and LS 5.x** + +This change will not affect you and you will continue to use the ES defaults. + +**Users upgrading from LS 2.x to LS 5.x with ES 5.x** + +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 8.1.1 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: + +* 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <> for more info. +* 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +[[dlq-policy]] +==== DLQ Policy + +Mapping (404) errors from Elasticsearch can lead to data loss. Unfortunately +mapping errors cannot be handled without human intervention and without looking +at the field that caused the mapping mismatch. If the DLQ is enabled, the +original events causing the mapping errors are stored in a file that can be +processed at a later time. Often times, the offending field can be removed and +re-indexed to Elasticsearch. If the DLQ is not enabled, and a mapping error +happens, the problem is logged as a warning, and the event is dropped. See +<> for more information about processing events in the DLQ. + +==== Batch Sizes + +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is <> + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is <> + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is <> + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is <> + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is <> + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v9.0.0.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v9.0.0.asciidoc new file mode 100644 index 000000000..431622e76 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v9.0.0.asciidoc @@ -0,0 +1,684 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v9.0.0 +:release_date: 2017-09-29 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v9.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x + +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +**Users installing ES 5.x and LS 5.x** + +This change will not affect you and you will continue to use the ES defaults. + +**Users upgrading from LS 2.x to LS 5.x with ES 5.x** + +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 8.1.1 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: + +* 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <> for more info. +* 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +[[dlq-policy]] +==== DLQ Policy + +Mapping (404) errors from Elasticsearch can lead to data loss. Unfortunately +mapping errors cannot be handled without human intervention and without looking +at the field that caused the mapping mismatch. If the DLQ is enabled, the +original events causing the mapping errors are stored in a file that can be +processed at a later time. Often times, the offending field can be removed and +re-indexed to Elasticsearch. If the DLQ is not enabled, and a mapping error +happens, the problem is logged as a warning, and the event is dropped. See +<> for more information about processing events in the DLQ. + +==== Batch Sizes + +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is <> + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + * This option is deprecated + +Note: This option is deprecated due to the https://www.elastic.co/guide/en/elasticsearch/reference/6.0/removal-of-types.html[removal of types in Logstash 6.0]. +It will be removed in the next major version of Logstash. +This sets the document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'doc'. + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is <> + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is <> + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0. +When using indexed (stored) scripts on Elasticsearch 6 and higher, you must set this parameter to `""` (empty string). + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is <> + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is <> + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch-v9.0.2.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v9.0.2.asciidoc new file mode 100644 index 000000000..f7c9238d1 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch-v9.0.2.asciidoc @@ -0,0 +1,686 @@ +:plugin: elasticsearch +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v9.0.2 +:release_date: 2017-11-30 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v9.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Elasticsearch output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +.Compatibility Note +[NOTE] +================================================================================ +Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] +called `http.content_type.required`. If this option is set to `true`, and you +are using Logstash 2.4 through 5.2, you need to update the Elasticsearch output +plugin to version 6.2.5 or higher. + +================================================================================ + +This plugin is the recommended method of storing logs in Elasticsearch. +If you plan on using the Kibana web interface, you'll want to use this output. + +This output only speaks the HTTP protocol. HTTP is the preferred protocol for interacting with Elasticsearch as of Logstash 2.0. +We strongly encourage the use of HTTP over the node protocol for a number of reasons. HTTP is only marginally slower, +yet far easier to administer and work with. When using the HTTP protocol one may upgrade Elasticsearch versions without having +to upgrade Logstash in lock-step. + +You can learn more about Elasticsearch at + +==== Template management for Elasticsearch 5.x + +Index template for this version (Logstash 5.0) has been changed to reflect Elasticsearch's mapping changes in version 5.0. +Most importantly, the subfield for string multi-fields has changed from `.raw` to `.keyword` to match ES default +behavior. + +**Users installing ES 5.x and LS 5.x** + +This change will not affect you and you will continue to use the ES defaults. + +**Users upgrading from LS 2.x to LS 5.x with ES 5.x** + +LS will not force upgrade the template, if `logstash` template already exists. This means you will still use +`.raw` for sub-fields coming from 2.x. If you choose to use the new template, you will have to reindex your data after +the new template is installed. + +==== Retry Policy + +The retry policy has changed significantly in the 8.1.1 release. +This plugin uses the Elasticsearch bulk API to optimize its imports into Elasticsearch. These requests may experience +either partial or total failures. The bulk API sends batches of requests to an HTTP endpoint. Error codes for the HTTP +request are handled differently than error codes for individual documents. + +HTTP requests to the bulk API are expected to return a 200 response code. All other response codes are retried indefinitely. + +The following document errors are handled as follows: + + * 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <> for more info. + * 409 errors (conflict) are logged as a warning and dropped. + +Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. +It is more performant for Elasticsearch to retry these exceptions than this plugin. + +[[dlq-policy]] +==== DLQ Policy + +Mapping (404) errors from Elasticsearch can lead to data loss. Unfortunately +mapping errors cannot be handled without human intervention and without looking +at the field that caused the mapping mismatch. If the DLQ is enabled, the +original events causing the mapping errors are stored in a file that can be +processed at a later time. Often times, the offending field can be removed and +re-indexed to Elasticsearch. If the DLQ is not enabled, and a mapping error +happens, the problem is logged as a warning, and the event is dropped. See +<> for more information about processing events in the DLQ. + +==== Batch Sizes + +This plugin attempts to send batches of events as a single request. However, if +a request exceeds 20MB we will break it up until multiple batch requests. If a single document exceeds 20MB it will be sent as a single request. + +==== DNS Caching + +This plugin uses the JVM to lookup DNS entries and is subject to the value of https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html[networkaddress.cache.ttl], +a global setting for the JVM. + +As an example, to set your DNS TTL to 1 second you would set +the `LS_JAVA_OPTS` environment variable to `-Dnetworkaddress.cache.ttl=1`. + +Keep in mind that a connection with keepalive enabled will +not reevaluate its DNS value while the keepalive is in effect. + +==== HTTP Compression + +This plugin supports request and response compression. Response compression is enabled by default and +for Elasticsearch versions 5.0 and later, the user doesn't have to set any configs in Elasticsearch for +it to send back compressed response. For versions before 5.0, `http.compression` must be set to `true` in +Elasticsearch[https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http] to take advantage of response compression when using this plugin + +For requests compression, regardless of the Elasticsearch version, users have to enable `http_compression` +setting in their Logstash config file. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value type is <> + * Default value is `"index"` + +Protocol agnostic (i.e. non-http, non-java specific) configs go here +Protocol agnostic methods +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option. NOTE: This does not work and is not supported + in Elasticsearch 1.x. Please upgrade to ES 2.x or greater to use this feature with Logstash! +- A sprintf style string to change the action based on the content of the event. The value `%{[foo]}` + would use the foo field for the action + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-bulk_path"] +===== `bulk_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to perform the _bulk requests to +this defaults to a concatenation of the path parameter and "_bulk" + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +The .cer or .pem file to validate the server's certificate + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + * This option is deprecated + +Note: This option is deprecated due to the https://www.elastic.co/guide/en/elasticsearch/reference/6.0/removal-of-types.html[removal of types in Logstash 6.0]. +It will be removed in the next major version of Logstash. +This sets the document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +If you don't set a value for this option: + +- for elasticsearch clusters 6.x and above: the value of 'doc' will be used; +- for elasticsearch clusters 5.x and below: the event's 'type' field will be used, if the field is not present the value of 'doc' will be used. + +[id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] +===== `failure_type_logging_whitelist` + + * Value type is <> + * Default value is `[]` + +Set the Elasticsearch errors in the whitelist that you don't want to log. +A useful example is when you want to skip all 409 errors +which are `document_already_exists_exception`. + +[id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] +===== `healthcheck_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path where a HEAD request is sent when a backend is marked down +the request is sent in the background to see if it has come back again +before it is once again eligible to service requests. +If you have custom firewall rules you may need to change this + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +The keystore used to present a certificate to the server. +It can be either .jks or .p12 + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the keystore password + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-parameters"] +===== `parameters` + + * Value type is <> + * There is no default value for this setting. + +Pass a set of key value pairs as the URL query string. This query string is added +to every host listed in the 'hosts' configuration. If the 'hosts' list contains +urls that already have query strings, the one specified here will be appended. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps +the root path for the Elasticsearch HTTP API lives. +Note that if you use paths as components of URLs in the 'hosts' field you may +not also set this field. That will raise an error at startup + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `1000` + +While the output tries to reuse connections efficiently we have a maximum. +This sets the maximum number of open connections the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `100` + +While the output tries to reuse connections efficiently we have a maximum per endpoint. +This sets the maximum number of open connections per endpoint the output will create. +Setting this too low may mean frequently closing / opening connections +which is bad. + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +Set the address of a forward HTTP proxy. +This used to accept hashes as arguments but now only accepts +arguments of the URI type to prevent leaking credentials. + +[id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] +===== `resurrect_delay` + + * Value type is <> + * Default value is `5` + +How frequently, in seconds, to wait between resurrection attempts. +Resurrection is the process by which backend endpoints marked 'down' are checked +to see if they have come back to life + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0. +When using indexed (stored) scripts on Elasticsearch 6 and higher, you must set this parameter to `""` (empty string). + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. +Note: This will return ALL nodes with HTTP enabled (including master nodes!). If you use +this with master nodes, you probably want to disable HTTP on them by setting +`http.enabled` to false in their elasticsearch.yml. You can either use the `sniffing` option or +manually enter multiple Elasticsearch hosts using the `hosts` parameter. + +[id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] +===== `sniffing_delay` + + * Value type is <> + * Default value is `5` + +How long to wait, in seconds, between sniffing attempts + +[id="{version}-plugins-{type}s-{plugin}-sniffing_path"] +===== `sniffing_path` + + * Value type is <> + * There is no default value for this setting. + +HTTP Path to be used for the sniffing requests +the default value is computed by concatenating the path value and "_nodes/http" +if sniffing_path is set it will be used as an absolute path +do not use full URL here, only paths, e.g. "/sniff/_nodes/http" + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme +is specified in the URLs listed in 'hosts'. If no explicit protocol is specified plain HTTP will be used. +If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS URL is given in 'hosts' + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] +===== `ssl_certificate_verification` + + * Value type is <> + * Default value is `true` + +Option to validate the server's certificate. Disabling this severely compromises security. +For more information on disabling certificate verification please read +https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `60` + +Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If +a timeout occurs, the request will be retried. + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore to validate the server's certificate. +Use either `:truststore` or `:cacert` + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Set the truststore password + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate to a secure Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `10000` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must +be re-validated prior to being leased to the consumer. Non-positive value passed to +this method disables connection validation. This check helps detect connections that +have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch_java-index.asciidoc b/docs/versioned-plugins/outputs/elasticsearch_java-index.asciidoc new file mode 100644 index 000000000..26b49a183 --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch_java-index.asciidoc @@ -0,0 +1,12 @@ +:plugin: elasticsearch_java +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-06-23 +|======================================================================= + +include::elasticsearch_java-v2.1.4.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/elasticsearch_java-v2.1.4.asciidoc b/docs/versioned-plugins/outputs/elasticsearch_java-v2.1.4.asciidoc new file mode 100644 index 000000000..321b9895c --- /dev/null +++ b/docs/versioned-plugins/outputs/elasticsearch_java-v2.1.4.asciidoc @@ -0,0 +1,491 @@ +:plugin: elasticsearch_java +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.1.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch_java/blob/v2.1.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Elasticsearch_java output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you store logs in Elasticsearch using the native 'node' and 'transport' +protocols. It is highly recommended to use the regular 'logstash-output-elasticsearch' output +which uses HTTP instead. This output is, in-fact, sometimes slower, and never faster than that one. +Additionally, upgrading your Elasticsearch cluster may require you to simultaneously update this +plugin for any protocol level changes. The HTTP client may be easier to work with due to wider +familiarity with HTTP. + +*VERSION NOTE*: Your Elasticsearch cluster must be running Elasticsearch 1.0.0 or later. + +If you want to set other Elasticsearch options that are not exposed directly +as configuration options, there are two methods: + +* Create an `elasticsearch.yml` file in the $PWD of the Logstash process +* Pass in es.* java properties (`java -Des.node.foo=` or `ruby -J-Des.node.foo=`) + +With the default `protocol` setting ("node"), this plugin will join your +Elasticsearch cluster as a client node, so it will show up in Elasticsearch's +cluster status. + +You can learn more about Elasticsearch at + +==== Operational Notes + +If using the default `protocol` setting ("node"), your firewalls might need +to permit port 9300 in *both* directions (from Logstash to Elasticsearch, and +Elasticsearch to Logstash) + +==== Retry Policy + +By default all bulk requests to ES are synchronous. Not all events in the bulk requests +always make it successfully. For example, there could be events which are not formatted +correctly for the index they are targeting (type mismatch in mapping). So that we minimize loss of +events, we have a specific retry policy in place. We retry all events which fail to be reached by +Elasticsearch for network related issues. We retry specific events which exhibit errors under a separate +policy described below. Events of this nature are ones which experience ES error codes described as +retryable errors. + +*Retryable Errors:* + +- 429, Too Many Requests (RFC6585) +- 503, The server is currently unable to handle the request due to a temporary overloading or maintenance of the server. + +Here are the rules of what is retried when: + +- Block and retry all events in bulk response that experiences transient network exceptions until + a successful submission is received by Elasticsearch. +- Retry subset of sent events which resulted in ES errors of a retryable nature which can be found + in RETRYABLE_CODES +- For events which returned retryable error codes, they will be pushed onto a separate queue for + retrying events. events in this queue will be retried a maximum of 5 times by default (configurable through :max_retries). The size of + this queue is capped by the value set in :retry_max_items. +- Events from the retry queue are submitted again either when the queue reaches its max size or when + the max interval time is reached, which is set in :retry_max_interval. +- Events which are not retryable or have reached their max retry count are logged to stderr. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Elasticsearch_java Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-action>> |<>, one of `["index", "delete", "create", "update", "create_unless_exists"]`|No +| <<{version}-plugins-{type}s-{plugin}-cluster>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-network_host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-node_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-protocol>> |<>, one of `["node", "transport"]`|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-transport_tcp_port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-action"] +===== `action` + + * Value can be any of: `index`, `delete`, `create`, `update`, `create_unless_exists` + * Default value is `"index"` + +The Elasticsearch action to perform. Valid actions are: + +- index: indexes a document (an event from Logstash). +- delete: deletes a document by id (An id is required for this action) +- create: indexes a document, fails if a document by that id already exists in the index. +- update: updates a document by id. Update has a special case where you can upsert -- update a + document if not already present. See the `upsert` option +- create_unless_exists: create the document unless it already exists, in which case do nothing. + +For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] + +[id="{version}-plugins-{type}s-{plugin}-cluster"] +===== `cluster` + + * Value type is <> + * There is no default value for this setting. + +The name of your cluster if you set it on the Elasticsearch side. Useful +for discovery when using `node` or `transport` protocols. +By default, it looks for a cluster named 'elasticsearch'. +Equivalent to the Elasticsearch option 'cluster.name' + +[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] +===== `doc_as_upsert` + + * Value type is <> + * Default value is `false` + +Enable `doc_as_upsert` for update mode. +Create a new document with source if `document_id` doesn't exist in Elasticsearch + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * There is no default value for this setting. + +The document ID for the index. Useful for overwriting existing entries in +Elasticsearch with the same ID. + +[id="{version}-plugins-{type}s-{plugin}-document_type"] +===== `document_type` + + * Value type is <> + * There is no default value for this setting. + +The document type to write events to. Generally you should try to write only +similar events to the same 'type'. String expansion `%{foo}` works here. +Unless you set 'document_type', the event 'type' will be used if it exists +otherwise the document type will be assigned the value of 'logs' + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * Value type is <> + * Default value is `[//127.0.0.1]` + +Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. +Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). + `"127.0.0.1"` + `["127.0.0.1:9200","127.0.0.2:9200"]` + `["http://127.0.0.1"]` + `["https://127.0.0.1:9200"]` + `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) +It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list +to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. + +Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `1` + + + +[id="{version}-plugins-{type}s-{plugin}-index"] +===== `index` + + * Value type is <> + * Default value is `"logstash-%{+YYYY.MM.dd}"` + +The index to write events to. This can be dynamic using the `%{foo}` syntax. +The default value will partition your indices by day so you can more easily +delete old data or only search specific date ranges. +Indexes may not contain uppercase characters. +For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. +LS uses Joda to format the index pattern from event timestamp. +Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. + +[id="{version}-plugins-{type}s-{plugin}-manage_template"] +===== `manage_template` + + * Value type is <> + * Default value is `true` + +From Logstash 1.3 onwards, a template is applied to Elasticsearch during +Logstash's startup if one with the name `template_name` does not already exist. +By default, the contents of this template is the default template for +`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern +`logstash-*`. Should you require support for other index names, or would like +to change the mappings in the template in general, a custom template can be +specified by setting `template` to the path of a template file. + +Setting `manage_template` to false disables this feature. If you require more +control over template creation, (e.g. creating indices dynamically based on +field names) you should set `manage_template` to false and use the REST +API to apply your templates manually. + +[id="{version}-plugins-{type}s-{plugin}-max_inflight_requests"] +===== `max_inflight_requests` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `50` + +This setting no longer does anything. It exists to keep config validation +from failing. It will be removed in future versions. + +[id="{version}-plugins-{type}s-{plugin}-network_host"] +===== `network_host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name/address of the host to bind to for Elasticsearch clustering. Equivalent to the Elasticsearch option 'network.host' +option. +This MUST be set for either protocol to work (node or transport)! The internal Elasticsearch node +will bind to this ip. This ip MUST be reachable by all nodes in the Elasticsearch cluster + +[id="{version}-plugins-{type}s-{plugin}-node_name"] +===== `node_name` + + * Value type is <> + * There is no default value for this setting. + +The node name Elasticsearch will use when joining a cluster. + +By default, this is generated internally by the ES client. + +[id="{version}-plugins-{type}s-{plugin}-parent"] +===== `parent` + + * Value type is <> + * Default value is `nil` + +For child documents, ID of the associated parent. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-pipeline"] +===== `pipeline` + + * Value type is <> + * Default value is `nil` + +Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration +here like `pipeline => "%{INGEST_PIPELINE}"` + +[id="{version}-plugins-{type}s-{plugin}-protocol"] +===== `protocol` + + * Value can be any of: `node`, `transport` + * Default value is `"transport"` + +Choose the protocol used to talk to Elasticsearch. + +The 'node' protocol (default) will connect to the cluster as a normal Elasticsearch +node (but will not store data). If you use the `node` protocol, you must permit +bidirectional communication on the port 9300 (or whichever port you have +configured). + +If you do not specify the `host` parameter, it will use multicast for http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-zen.html[Elasticsearch discovery]. While this may work in a test/dev environment where multicast is enabled in +Elasticsearch, we strongly recommend http://www.elastic.co/guide/en/elasticsearch/guide/current/important-configuration-changes.html#unicast[using unicast] +in Elasticsearch. To connect to an Elasticsearch cluster with unicast, +you must include the `host` parameter (see relevant section above). + +The 'transport' protocol will connect to the host you specify and will +not show up as a 'node' in the Elasticsearch cluster. This is useful +in situations where you cannot permit connections outbound from the +Elasticsearch cluster to this Logstash server. + +All protocols will use bulk requests when talking to Elasticsearch. + +[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] +===== `retry_initial_interval` + + * Value type is <> + * Default value is `2` + +Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` + +[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] +===== `retry_max_interval` + + * Value type is <> + * Default value is `64` + +Set max interval in seconds between bulk retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] +===== `retry_on_conflict` + + * Value type is <> + * Default value is `1` + +The number of times Elasticsearch should internally retry an update/upserted document +See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] +for more info + +[id="{version}-plugins-{type}s-{plugin}-routing"] +===== `routing` + + * Value type is <> + * There is no default value for this setting. + +A routing override to be applied to all processed events. +This can be dynamic using the `%{foo}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-script"] +===== `script` + + * Value type is <> + * Default value is `""` + +Set script name for scripted update mode + +[id="{version}-plugins-{type}s-{plugin}-script_lang"] +===== `script_lang` + + * Value type is <> + * Default value is `"painless"` + +Set the language of the used script. If not set, this defaults to painless in ES 5.0 + +[id="{version}-plugins-{type}s-{plugin}-script_type"] +===== `script_type` + + * Value can be any of: `inline`, `indexed`, `file` + * Default value is `["inline"]` + +Define the type of script referenced by "script" variable + inline : "script" contains inline script + indexed : "script" contains the name of script directly indexed in elasticsearch + file : "script" contains the name of script stored in elasticseach's config directory + +[id="{version}-plugins-{type}s-{plugin}-script_var_name"] +===== `script_var_name` + + * Value type is <> + * Default value is `"event"` + +Set variable name passed to script (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] +===== `scripted_upsert` + + * Value type is <> + * Default value is `false` + +if enabled, script is in charge of creating non-existent document (scripted update) + +[id="{version}-plugins-{type}s-{plugin}-sniffing"] +===== `sniffing` + + * Value type is <> + * Default value is `false` + +Enable cluster sniffing (transport only). +Asks host for the list of all cluster nodes and adds them to the hosts list +Equivalent to the Elasticsearch option 'client.transport.sniff' + +[id="{version}-plugins-{type}s-{plugin}-template"] +===== `template` + + * Value type is <> + * There is no default value for this setting. + +You can set the path to your own template here, if you so desire. +If not set, the included template will be used. + +[id="{version}-plugins-{type}s-{plugin}-template_name"] +===== `template_name` + + * Value type is <> + * Default value is `"logstash"` + +This configuration option defines how the template is named inside Elasticsearch. +Note that if you have used the template management features and subsequently +change this, you will need to prune the old template manually, e.g. + +`curl -XDELETE ` + +where `OldTemplateName` is whatever the former setting was. + +[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] +===== `template_overwrite` + + * Value type is <> + * Default value is `false` + +The template_overwrite option will always overwrite the indicated template +in Elasticsearch with either the one indicated by template or the included one. +This option is set to false by default. If you always want to stay up to date +with the template provided by Logstash, this option could be very useful to you. +Likewise, if you have your own template file managed by puppet, for example, and +you wanted to be able to update it regularly, this option could help there as well. + +Please note that if you are using your own customized version of the Logstash +template (logstash), setting this to true will make Logstash to overwrite +the "logstash" template (i.e. removing all customized settings) + +[id="{version}-plugins-{type}s-{plugin}-transport_tcp_port"] +===== `transport_tcp_port` + + * Value type is <> + * There is no default value for this setting. + +This sets the local port to bind to. Equivalent to the Elasticsrearch option 'transport.tcp.port' + +[id="{version}-plugins-{type}s-{plugin}-upsert"] +===== `upsert` + + * Value type is <> + * Default value is `""` + +Set upsert content for update mode.s +Create a new document with this parameter as json string if `document_id` doesn't exists + +[id="{version}-plugins-{type}s-{plugin}-version"] +===== `version` + + * Value type is <> + * There is no default value for this setting. + +The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. +See https://www.elastic.co/blog/elasticsearch-versioning-support. + +[id="{version}-plugins-{type}s-{plugin}-version_type"] +===== `version_type` + + * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` + * There is no default value for this setting. + +The version_type to use for indexing. +See https://www.elastic.co/blog/elasticsearch-versioning-support. +See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/email-index.asciidoc b/docs/versioned-plugins/outputs/email-index.asciidoc new file mode 100644 index 000000000..afc9166f3 --- /dev/null +++ b/docs/versioned-plugins/outputs/email-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: email +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::email-v4.0.6.asciidoc[] +include::email-v4.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/email-v4.0.4.asciidoc b/docs/versioned-plugins/outputs/email-v4.0.4.asciidoc new file mode 100644 index 000000000..698db27f9 --- /dev/null +++ b/docs/versioned-plugins/outputs/email-v4.0.4.asciidoc @@ -0,0 +1,233 @@ +:plugin: email +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-email/blob/v4.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Email output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + +==== Usage Example +[source,ruby] +---------------------------------- +output { + if "shouldmail" in [tags] { + email { + to => 'technical@logstash.net' + from => 'monitor@logstash.net' + subject => 'Alert - %{title}' + body => "Tags: %{tags}\\n\\Content:\\n%{message}" + domain => 'mail.logstash.net' + port => 25 + } + } +} +---------------------------------- +Send email when an output is received. Alternatively, you may include or +exclude the email output execution using conditionals. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Email Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-attachments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-authentication>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-body>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cc>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-contenttype>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-debug>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-domain>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-from>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-htmlbody>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-replyto>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-subject>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-to>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-use_tls>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-via>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-address"] +===== `address` + + * Value type is <> + * Default value is `"localhost"` + +The address used to connect to the mail server + +[id="{version}-plugins-{type}s-{plugin}-attachments"] +===== `attachments` + + * Value type is <> + * Default value is `[]` + +Attachments - specify the name(s) and location(s) of the files. + +[id="{version}-plugins-{type}s-{plugin}-authentication"] +===== `authentication` + + * Value type is <> + * There is no default value for this setting. + +Authentication method used when identifying with the server + +[id="{version}-plugins-{type}s-{plugin}-body"] +===== `body` + + * Value type is <> + * Default value is `""` + +Body for the email - plain text only. + +[id="{version}-plugins-{type}s-{plugin}-cc"] +===== `cc` + + * Value type is <> + * There is no default value for this setting. + +The fully-qualified email address(es) to include as cc: address(es). + +This field also accepts a comma-separated string of addresses, for example: +`"me@host.com, you@host.com"` + +[id="{version}-plugins-{type}s-{plugin}-contenttype"] +===== `contenttype` + + * Value type is <> + * Default value is `"text/html; charset=UTF-8"` + +contenttype : for multipart messages, set the content-type and/or charset of the HTML part. +NOTE: this may not be functional (KH) + +[id="{version}-plugins-{type}s-{plugin}-debug"] +===== `debug` + + * Value type is <> + * Default value is `false` + +Run the mail relay in debug mode + +[id="{version}-plugins-{type}s-{plugin}-domain"] +===== `domain` + + * Value type is <> + * Default value is `"localhost"` + +Domain used to send the email messages + +[id="{version}-plugins-{type}s-{plugin}-from"] +===== `from` + + * Value type is <> + * Default value is `"logstash.alert@nowhere.com"` + +The fully-qualified email address for the From: field in the email. + +[id="{version}-plugins-{type}s-{plugin}-htmlbody"] +===== `htmlbody` + + * Value type is <> + * Default value is `""` + +HTML Body for the email, which may contain HTML markup. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate with the server + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `25` + +Port used to communicate with the mail server + +[id="{version}-plugins-{type}s-{plugin}-replyto"] +===== `replyto` + + * Value type is <> + * There is no default value for this setting. + +The fully qualified email address for the Reply-To: field. + +[id="{version}-plugins-{type}s-{plugin}-subject"] +===== `subject` + + * Value type is <> + * Default value is `""` + +Subject: for the email. + +[id="{version}-plugins-{type}s-{plugin}-to"] +===== `to` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The fully-qualified email address to send the email to. + +This field also accepts a comma-separated string of addresses, for example: +`"me@host.com, you@host.com"` + +You can also use dynamic fields from the event with the `%{fieldname}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-use_tls"] +===== `use_tls` + + * Value type is <> + * Default value is `false` + +Enables TLS when communicating with the server + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate with the server + +[id="{version}-plugins-{type}s-{plugin}-via"] +===== `via` + + * Value type is <> + * Default value is `"smtp"` + +How Logstash should send the email, either via SMTP or by invoking sendmail. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/email-v4.0.6.asciidoc b/docs/versioned-plugins/outputs/email-v4.0.6.asciidoc new file mode 100644 index 000000000..303be1097 --- /dev/null +++ b/docs/versioned-plugins/outputs/email-v4.0.6.asciidoc @@ -0,0 +1,235 @@ +:plugin: email +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.6 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-email/blob/v4.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Email output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Sends email when an output is received. Alternatively, you may include or +exclude the email output execution using conditionals. + +==== Usage Example +[source,ruby] +---------------------------------- +output { + if "shouldmail" in [tags] { + email { + to => 'technical@example.com' + from => 'monitor@example.com' + subject => 'Alert - %{title}' + body => "Tags: %{tags}\\n\\Content:\\n%{message}" + domain => 'mail.example.com' + port => 25 + } + } +} +---------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Email Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-attachments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-authentication>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-body>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cc>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-contenttype>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-debug>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-domain>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-from>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-htmlbody>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-replyto>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-subject>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-to>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-use_tls>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-via>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-address"] +===== `address` + + * Value type is <> + * Default value is `"localhost"` + +The address used to connect to the mail server + +[id="{version}-plugins-{type}s-{plugin}-attachments"] +===== `attachments` + + * Value type is <> + * Default value is `[]` + +Attachments - specify the name(s) and location(s) of the files. + +[id="{version}-plugins-{type}s-{plugin}-authentication"] +===== `authentication` + + * Value type is <> + * There is no default value for this setting. + +Authentication method used when identifying with the server + +[id="{version}-plugins-{type}s-{plugin}-body"] +===== `body` + + * Value type is <> + * Default value is `""` + +Body for the email - plain text only. + +[id="{version}-plugins-{type}s-{plugin}-cc"] +===== `cc` + + * Value type is <> + * There is no default value for this setting. + +The fully-qualified email address(es) to include as cc: address(es). + +This field also accepts a comma-separated string of addresses, for example: +`"me@example.com, you@example.com"` + +[id="{version}-plugins-{type}s-{plugin}-contenttype"] +===== `contenttype` + + * Value type is <> + * Default value is `"text/html; charset=UTF-8"` + +contenttype : for multipart messages, set the content-type and/or charset of the HTML part. +NOTE: this may not be functional (KH) + +[id="{version}-plugins-{type}s-{plugin}-debug"] +===== `debug` + + * Value type is <> + * Default value is `false` + +Run the mail relay in debug mode + +[id="{version}-plugins-{type}s-{plugin}-domain"] +===== `domain` + + * Value type is <> + * Default value is `"localhost"` + +The HELO/EHLO domain name used in the greeting message when connecting +to a remote SMTP server. Some servers require this name to match the +actual hostname of the connecting client. + +[id="{version}-plugins-{type}s-{plugin}-from"] +===== `from` + + * Value type is <> + * Default value is `"logstash.alert@example.com"` + +The fully-qualified email address for the From: field in the email. + +[id="{version}-plugins-{type}s-{plugin}-htmlbody"] +===== `htmlbody` + + * Value type is <> + * Default value is `""` + +HTML Body for the email, which may contain HTML markup. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate with the server + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `25` + +Port used to communicate with the mail server + +[id="{version}-plugins-{type}s-{plugin}-replyto"] +===== `replyto` + + * Value type is <> + * There is no default value for this setting. + +The fully qualified email address for the Reply-To: field. + +[id="{version}-plugins-{type}s-{plugin}-subject"] +===== `subject` + + * Value type is <> + * Default value is `""` + +Subject: for the email. + +[id="{version}-plugins-{type}s-{plugin}-to"] +===== `to` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The fully-qualified email address to send the email to. + +This field also accepts a comma-separated string of addresses, for example: +`"me@example.com, you@example.com"` + +You can also use dynamic fields from the event with the `%{fieldname}` syntax. + +[id="{version}-plugins-{type}s-{plugin}-use_tls"] +===== `use_tls` + + * Value type is <> + * Default value is `false` + +Enables TLS when communicating with the server + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * Value type is <> + * There is no default value for this setting. + +Username to authenticate with the server + +[id="{version}-plugins-{type}s-{plugin}-via"] +===== `via` + + * Value type is <> + * Default value is `"smtp"` + +How Logstash should send the email, either via SMTP or by invoking sendmail. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/example-index.asciidoc b/docs/versioned-plugins/outputs/example-index.asciidoc new file mode 100644 index 000000000..17979309d --- /dev/null +++ b/docs/versioned-plugins/outputs/example-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: example +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/outputs/exec-index.asciidoc b/docs/versioned-plugins/outputs/exec-index.asciidoc new file mode 100644 index 000000000..a2cf35e1e --- /dev/null +++ b/docs/versioned-plugins/outputs/exec-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: exec +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-21 +| <> | 2017-06-23 +|======================================================================= + +include::exec-v3.1.3.asciidoc[] +include::exec-v3.1.2.asciidoc[] +include::exec-v3.1.1.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/exec-v3.1.1.asciidoc b/docs/versioned-plugins/outputs/exec-v3.1.1.asciidoc new file mode 100644 index 000000000..d44f4b669 --- /dev/null +++ b/docs/versioned-plugins/outputs/exec-v3.1.1.asciidoc @@ -0,0 +1,86 @@ +:plugin: exec +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-exec/blob/v3.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Exec output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The exec output will run a command for each event received. Ruby's +`system()` function will be used, i.e. the command string will +be passed to a shell. You can use `%{name}` and other dynamic strings +in the command to pass select fields from the event to the child +process. Example: +[source,ruby] + output { + if [type] == "abuse" { + exec { + command => "iptables -A INPUT -s %{clientip} -j DROP" + } + } + } + +WARNING: If you want it non-blocking you should use `&` or `dtach` +or other such techniques. There is no timeout for the commands being +run so misbehaving commands could otherwise stall the Logstash +pipeline indefinitely. + +WARNING: Exercise great caution with `%{name}` field placeholders. +The contents of the field will be included verbatim without any +sanitization, i.e. any shell metacharacters from the field values +will be passed straight to the shell. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Exec Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-quiet>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-command"] +===== `command` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Command line to execute via subprocess. Use `dtach` or `screen` to +make it non blocking. This value can include `%{name}` and other +dynamic strings. + +[id="{version}-plugins-{type}s-{plugin}-quiet"] +===== `quiet` + + * Value type is <> + * Default value is `false` + +display the result of the command to the terminal + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/exec-v3.1.2.asciidoc b/docs/versioned-plugins/outputs/exec-v3.1.2.asciidoc new file mode 100644 index 000000000..2679ebee6 --- /dev/null +++ b/docs/versioned-plugins/outputs/exec-v3.1.2.asciidoc @@ -0,0 +1,86 @@ +:plugin: exec +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-08-21 +:changelog_url: https://github.com/logstash-plugins/logstash-output-exec/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Exec output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The exec output will run a command for each event received. Ruby's +`system()` function will be used, i.e. the command string will +be passed to a shell. You can use `%{name}` and other dynamic strings +in the command to pass select fields from the event to the child +process. Example: +[source,ruby] + output { + if [type] == "abuse" { + exec { + command => "iptables -A INPUT -s %{clientip} -j DROP" + } + } + } + +WARNING: If you want it non-blocking you should use `&` or `dtach` +or other such techniques. There is no timeout for the commands being +run so misbehaving commands could otherwise stall the Logstash +pipeline indefinitely. + +WARNING: Exercise great caution with `%{name}` field placeholders. +The contents of the field will be included verbatim without any +sanitization, i.e. any shell metacharacters from the field values +will be passed straight to the shell. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Exec Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-quiet>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-command"] +===== `command` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Command line to execute via subprocess. Use `dtach` or `screen` to +make it non blocking. This value can include `%{name}` and other +dynamic strings. + +[id="{version}-plugins-{type}s-{plugin}-quiet"] +===== `quiet` + + * Value type is <> + * Default value is `false` + +display the result of the command to the terminal + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/exec-v3.1.3.asciidoc b/docs/versioned-plugins/outputs/exec-v3.1.3.asciidoc new file mode 100644 index 000000000..18c1732a4 --- /dev/null +++ b/docs/versioned-plugins/outputs/exec-v3.1.3.asciidoc @@ -0,0 +1,86 @@ +:plugin: exec +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-exec/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Exec output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The exec output will run a command for each event received. Ruby's +`system()` function will be used, i.e. the command string will +be passed to a shell. You can use `%{name}` and other dynamic strings +in the command to pass select fields from the event to the child +process. Example: +[source,ruby] + output { + if [type] == "abuse" { + exec { + command => "iptables -A INPUT -s %{clientip} -j DROP" + } + } + } + +WARNING: If you want it non-blocking you should use `&` or `dtach` +or other such techniques. There is no timeout for the commands being +run so misbehaving commands could otherwise stall the Logstash +pipeline indefinitely. + +WARNING: Exercise great caution with `%{name}` field placeholders. +The contents of the field will be included verbatim without any +sanitization, i.e. any shell metacharacters from the field values +will be passed straight to the shell. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Exec Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-quiet>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-command"] +===== `command` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Command line to execute via subprocess. Use `dtach` or `screen` to +make it non blocking. This value can include `%{name}` and other +dynamic strings. + +[id="{version}-plugins-{type}s-{plugin}-quiet"] +===== `quiet` + + * Value type is <> + * Default value is `false` + +display the result of the command to the terminal + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/file-index.asciidoc b/docs/versioned-plugins/outputs/file-index.asciidoc new file mode 100644 index 000000000..b63d73c5b --- /dev/null +++ b/docs/versioned-plugins/outputs/file-index.asciidoc @@ -0,0 +1,22 @@ +:plugin: file +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-24 +| <> | 2017-11-23 +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-08-01 +| <> | 2017-06-23 +|======================================================================= + +include::file-v4.2.1.asciidoc[] +include::file-v4.2.0.asciidoc[] +include::file-v4.1.2.asciidoc[] +include::file-v4.1.1.asciidoc[] +include::file-v4.1.0.asciidoc[] +include::file-v4.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/file-v4.0.2.asciidoc b/docs/versioned-plugins/outputs/file-v4.0.2.asciidoc new file mode 100644 index 000000000..aabd92a39 --- /dev/null +++ b/docs/versioned-plugins/outputs/file-v4.0.2.asciidoc @@ -0,0 +1,136 @@ +:plugin: file +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-file/blob/v4.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== File output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output writes events to files on disk. You can use fields +from the event as parts of the filename and/or path. + +By default, this output writes one event per line in **json** format. +You can customise the line format using the `line` codec like +[source,ruby] +output { + file { + path => ... + codec => line { format => "custom format: %{message}"} + } +} + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== File Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] +===== `create_if_deleted` + + * Value type is <> + * Default value is `true` + +If the configured file is deleted, but an event is handled by the plugin, +the plugin will recreate the file. Default => true + +[id="{version}-plugins-{type}s-{plugin}-dir_mode"] +===== `dir_mode` + + * Value type is <> + * Default value is `-1` + +Dir access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"dir_mode" => 0750` + +[id="{version}-plugins-{type}s-{plugin}-file_mode"] +===== `file_mode` + + * Value type is <> + * Default value is `-1` + +File access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"file_mode" => 0640` + +[id="{version}-plugins-{type}s-{plugin}-filename_failure"] +===== `filename_failure` + + * Value type is <> + * Default value is `"_filepath_failures"` + +If the generated path is invalid, the events will be saved +into this file and inside the defined path. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is <> + * Default value is `2` + +Flush interval (in seconds) for flushing writes to log files. +0 will flush on every message. + +[id="{version}-plugins-{type}s-{plugin}-gzip"] +===== `gzip` + + * Value type is <> + * Default value is `false` + +Gzip the output stream before writing to disk. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The path to the file to write. Event fields can be used here, +like `/var/log/logstash/%{host}/%{application}` +One may also utilize the path option for date-based log +rotation via the joda time format. This will use the event +timestamp. +E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create +`./test-2013-05-29.txt` + +If you use an absolute path you cannot start with a dynamic string. +E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/file-v4.1.0.asciidoc b/docs/versioned-plugins/outputs/file-v4.1.0.asciidoc new file mode 100644 index 000000000..17a94dfd7 --- /dev/null +++ b/docs/versioned-plugins/outputs/file-v4.1.0.asciidoc @@ -0,0 +1,136 @@ +:plugin: file +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.0 +:release_date: 2017-08-01 +:changelog_url: https://github.com/logstash-plugins/logstash-output-file/blob/v4.1.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== File output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output writes events to files on disk. You can use fields +from the event as parts of the filename and/or path. + +By default, this output writes one event per line in **json** format. +You can customise the line format using the `line` codec like +[source,ruby] +output { + file { + path => ... + codec => line { format => "custom format: %{message}"} + } +} + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== File Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] +===== `create_if_deleted` + + * Value type is <> + * Default value is `true` + +If the configured file is deleted, but an event is handled by the plugin, +the plugin will recreate the file. Default => true + +[id="{version}-plugins-{type}s-{plugin}-dir_mode"] +===== `dir_mode` + + * Value type is <> + * Default value is `-1` + +Dir access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"dir_mode" => 0750` + +[id="{version}-plugins-{type}s-{plugin}-file_mode"] +===== `file_mode` + + * Value type is <> + * Default value is `-1` + +File access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"file_mode" => 0640` + +[id="{version}-plugins-{type}s-{plugin}-filename_failure"] +===== `filename_failure` + + * Value type is <> + * Default value is `"_filepath_failures"` + +If the generated path is invalid, the events will be saved +into this file and inside the defined path. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is <> + * Default value is `2` + +Flush interval (in seconds) for flushing writes to log files. +0 will flush on every message. + +[id="{version}-plugins-{type}s-{plugin}-gzip"] +===== `gzip` + + * Value type is <> + * Default value is `false` + +Gzip the output stream before writing to disk. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The path to the file to write. Event fields can be used here, +like `/var/log/logstash/%{host}/%{application}` +One may also utilize the path option for date-based log +rotation via the joda time format. This will use the event +timestamp. +E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create +`./test-2013-05-29.txt` + +If you use an absolute path you cannot start with a dynamic string. +E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/file-v4.1.1.asciidoc b/docs/versioned-plugins/outputs/file-v4.1.1.asciidoc new file mode 100644 index 000000000..d2f3e389f --- /dev/null +++ b/docs/versioned-plugins/outputs/file-v4.1.1.asciidoc @@ -0,0 +1,136 @@ +:plugin: file +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.1 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-file/blob/v4.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== File output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output writes events to files on disk. You can use fields +from the event as parts of the filename and/or path. + +By default, this output writes one event per line in **json** format. +You can customise the line format using the `line` codec like +[source,ruby] +output { + file { + path => ... + codec => line { format => "custom format: %{message}"} + } +} + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== File Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] +===== `create_if_deleted` + + * Value type is <> + * Default value is `true` + +If the configured file is deleted, but an event is handled by the plugin, +the plugin will recreate the file. Default => true + +[id="{version}-plugins-{type}s-{plugin}-dir_mode"] +===== `dir_mode` + + * Value type is <> + * Default value is `-1` + +Dir access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"dir_mode" => 0750` + +[id="{version}-plugins-{type}s-{plugin}-file_mode"] +===== `file_mode` + + * Value type is <> + * Default value is `-1` + +File access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"file_mode" => 0640` + +[id="{version}-plugins-{type}s-{plugin}-filename_failure"] +===== `filename_failure` + + * Value type is <> + * Default value is `"_filepath_failures"` + +If the generated path is invalid, the events will be saved +into this file and inside the defined path. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is <> + * Default value is `2` + +Flush interval (in seconds) for flushing writes to log files. +0 will flush on every message. + +[id="{version}-plugins-{type}s-{plugin}-gzip"] +===== `gzip` + + * Value type is <> + * Default value is `false` + +Gzip the output stream before writing to disk. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The path to the file to write. Event fields can be used here, +like `/var/log/logstash/%{host}/%{application}` +One may also utilize the path option for date-based log +rotation via the joda time format. This will use the event +timestamp. +E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create +`./test-2013-05-29.txt` + +If you use an absolute path you cannot start with a dynamic string. +E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/file-v4.1.2.asciidoc b/docs/versioned-plugins/outputs/file-v4.1.2.asciidoc new file mode 100644 index 000000000..d4e2b076c --- /dev/null +++ b/docs/versioned-plugins/outputs/file-v4.1.2.asciidoc @@ -0,0 +1,136 @@ +:plugin: file +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.1.2 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-file/blob/v4.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== File output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output writes events to files on disk. You can use fields +from the event as parts of the filename and/or path. + +By default, this output writes one event per line in **json** format. +You can customise the line format using the `line` codec like +[source,ruby] +output { + file { + path => ... + codec => line { format => "custom format: %{message}"} + } +} + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== File Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] +===== `create_if_deleted` + + * Value type is <> + * Default value is `true` + +If the configured file is deleted, but an event is handled by the plugin, +the plugin will recreate the file. Default => true + +[id="{version}-plugins-{type}s-{plugin}-dir_mode"] +===== `dir_mode` + + * Value type is <> + * Default value is `-1` + +Dir access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"dir_mode" => 0750` + +[id="{version}-plugins-{type}s-{plugin}-file_mode"] +===== `file_mode` + + * Value type is <> + * Default value is `-1` + +File access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"file_mode" => 0640` + +[id="{version}-plugins-{type}s-{plugin}-filename_failure"] +===== `filename_failure` + + * Value type is <> + * Default value is `"_filepath_failures"` + +If the generated path is invalid, the events will be saved +into this file and inside the defined path. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is <> + * Default value is `2` + +Flush interval (in seconds) for flushing writes to log files. +0 will flush on every message. + +[id="{version}-plugins-{type}s-{plugin}-gzip"] +===== `gzip` + + * Value type is <> + * Default value is `false` + +Gzip the output stream before writing to disk. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The path to the file to write. Event fields can be used here, +like `/var/log/logstash/%{host}/%{application}` +One may also utilize the path option for date-based log +rotation via the joda time format. This will use the event +timestamp. +E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create +`./test-2013-05-29.txt` + +If you use an absolute path you cannot start with a dynamic string. +E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/file-v4.2.0.asciidoc b/docs/versioned-plugins/outputs/file-v4.2.0.asciidoc new file mode 100644 index 000000000..b952d9d46 --- /dev/null +++ b/docs/versioned-plugins/outputs/file-v4.2.0.asciidoc @@ -0,0 +1,136 @@ +:plugin: file +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.2.0 +:release_date: 2017-11-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-file/blob/v4.2.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== File output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output writes events to files on disk. You can use fields +from the event as parts of the filename and/or path. + +By default, this output writes one event per line in **json** format. +You can customise the line format using the `line` codec like +[source,ruby] +output { + file { + path => ... + codec => line { format => "custom format: %{message}"} + } +} + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== File Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] +===== `create_if_deleted` + + * Value type is <> + * Default value is `true` + +If the configured file is deleted, but an event is handled by the plugin, +the plugin will recreate the file. Default => true + +[id="{version}-plugins-{type}s-{plugin}-dir_mode"] +===== `dir_mode` + + * Value type is <> + * Default value is `-1` + +Dir access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"dir_mode" => 0750` + +[id="{version}-plugins-{type}s-{plugin}-file_mode"] +===== `file_mode` + + * Value type is <> + * Default value is `-1` + +File access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"file_mode" => 0640` + +[id="{version}-plugins-{type}s-{plugin}-filename_failure"] +===== `filename_failure` + + * Value type is <> + * Default value is `"_filepath_failures"` + +If the generated path is invalid, the events will be saved +into this file and inside the defined path. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is <> + * Default value is `2` + +Flush interval (in seconds) for flushing writes to log files. +0 will flush on every message. + +[id="{version}-plugins-{type}s-{plugin}-gzip"] +===== `gzip` + + * Value type is <> + * Default value is `false` + +Gzip the output stream before writing to disk. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The path to the file to write. Event fields can be used here, +like `/var/log/logstash/%{host}/%{application}` +One may also utilize the path option for date-based log +rotation via the joda time format. This will use the event +timestamp. +E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create +`./test-2013-05-29.txt` + +If you use an absolute path you cannot start with a dynamic string. +E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/file-v4.2.1.asciidoc b/docs/versioned-plugins/outputs/file-v4.2.1.asciidoc new file mode 100644 index 000000000..c12eb4cc1 --- /dev/null +++ b/docs/versioned-plugins/outputs/file-v4.2.1.asciidoc @@ -0,0 +1,136 @@ +:plugin: file +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.2.1 +:release_date: 2017-11-24 +:changelog_url: https://github.com/logstash-plugins/logstash-output-file/blob/v4.2.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== File output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output writes events to files on disk. You can use fields +from the event as parts of the filename and/or path. + +By default, this output writes one event per line in **json** format. +You can customise the line format using the `line` codec like +[source,ruby] +output { + file { + path => ... + codec => line { format => "custom format: %{message}"} + } +} + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== File Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] +===== `create_if_deleted` + + * Value type is <> + * Default value is `true` + +If the configured file is deleted, but an event is handled by the plugin, +the plugin will recreate the file. Default => true + +[id="{version}-plugins-{type}s-{plugin}-dir_mode"] +===== `dir_mode` + + * Value type is <> + * Default value is `-1` + +Dir access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"dir_mode" => 0750` + +[id="{version}-plugins-{type}s-{plugin}-file_mode"] +===== `file_mode` + + * Value type is <> + * Default value is `-1` + +File access mode to use. Note that due to the bug in jruby system umask +is ignored on linux: https://github.com/jruby/jruby/issues/3426 +Setting it to -1 uses default OS value. +Example: `"file_mode" => 0640` + +[id="{version}-plugins-{type}s-{plugin}-filename_failure"] +===== `filename_failure` + + * Value type is <> + * Default value is `"_filepath_failures"` + +If the generated path is invalid, the events will be saved +into this file and inside the defined path. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval"] +===== `flush_interval` + + * Value type is <> + * Default value is `2` + +Flush interval (in seconds) for flushing writes to log files. +0 will flush on every message. + +[id="{version}-plugins-{type}s-{plugin}-gzip"] +===== `gzip` + + * Value type is <> + * Default value is `false` + +Gzip the output stream before writing to disk. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The path to the file to write. Event fields can be used here, +like `/var/log/logstash/%{host}/%{application}` +One may also utilize the path option for date-based log +rotation via the joda time format. This will use the event +timestamp. +E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create +`./test-2013-05-29.txt` + +If you use an absolute path you cannot start with a dynamic string. +E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/firehose-index.asciidoc b/docs/versioned-plugins/outputs/firehose-index.asciidoc new file mode 100644 index 000000000..69f53d5ee --- /dev/null +++ b/docs/versioned-plugins/outputs/firehose-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: firehose +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/outputs/ganglia-index.asciidoc b/docs/versioned-plugins/outputs/ganglia-index.asciidoc new file mode 100644 index 000000000..01bf6bd0d --- /dev/null +++ b/docs/versioned-plugins/outputs/ganglia-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: ganglia +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::ganglia-v3.0.5.asciidoc[] +include::ganglia-v3.0.4.asciidoc[] +include::ganglia-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/ganglia-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/ganglia-v3.0.3.asciidoc new file mode 100644 index 000000000..5a96f9d34 --- /dev/null +++ b/docs/versioned-plugins/outputs/ganglia-v3.0.3.asciidoc @@ -0,0 +1,139 @@ +:plugin: ganglia +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-ganglia/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Ganglia output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you to pull metrics from your logs and ship them to +ganglia's gmond. This is heavily based on the graphite output. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ganglia Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-group>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lifetime>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metric>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-metric_type>> |<>, one of `["string", "int8", "uint8", "int16", "uint16", "int32", "uint32", "float", "double"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-slope>> |<>, one of `["zero", "positive", "negative", "both", "unspecified"]`|No +| <<{version}-plugins-{type}s-{plugin}-units>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-value>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-group"] +===== `group` + + * Value type is <> + * Default value is `""` + +Metric group + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The address of the ganglia server. + +[id="{version}-plugins-{type}s-{plugin}-lifetime"] +===== `lifetime` + + * Value type is <> + * Default value is `300` + +Lifetime in seconds of this metric + +[id="{version}-plugins-{type}s-{plugin}-max_interval"] +===== `max_interval` + + * Value type is <> + * Default value is `60` + +Maximum time in seconds between gmetric calls for this metric. + +[id="{version}-plugins-{type}s-{plugin}-metric"] +===== `metric` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The metric to use. This supports dynamic strings like `%{host}` + +[id="{version}-plugins-{type}s-{plugin}-metric_type"] +===== `metric_type` + + * Value can be any of: `string`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `uint32`, `float`, `double` + * Default value is `"uint8"` + +The type of value for this metric. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8649` + +The port to connect on your ganglia server. + +[id="{version}-plugins-{type}s-{plugin}-slope"] +===== `slope` + + * Value can be any of: `zero`, `positive`, `negative`, `both`, `unspecified` + * Default value is `"both"` + +Metric slope, represents metric behavior + +[id="{version}-plugins-{type}s-{plugin}-units"] +===== `units` + + * Value type is <> + * Default value is `""` + +Gmetric units for metric, such as "kb/sec" or "ms" or whatever unit +this metric uses. + +[id="{version}-plugins-{type}s-{plugin}-value"] +===== `value` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The value to use. This supports dynamic strings like `%{bytes}` +It will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/ganglia-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/ganglia-v3.0.4.asciidoc new file mode 100644 index 000000000..46fe80ea4 --- /dev/null +++ b/docs/versioned-plugins/outputs/ganglia-v3.0.4.asciidoc @@ -0,0 +1,139 @@ +:plugin: ganglia +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-ganglia/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Ganglia output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you to pull metrics from your logs and ship them to +ganglia's gmond. This is heavily based on the graphite output. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ganglia Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-group>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lifetime>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metric>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-metric_type>> |<>, one of `["string", "int8", "uint8", "int16", "uint16", "int32", "uint32", "float", "double"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-slope>> |<>, one of `["zero", "positive", "negative", "both", "unspecified"]`|No +| <<{version}-plugins-{type}s-{plugin}-units>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-value>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-group"] +===== `group` + + * Value type is <> + * Default value is `""` + +Metric group + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The address of the ganglia server. + +[id="{version}-plugins-{type}s-{plugin}-lifetime"] +===== `lifetime` + + * Value type is <> + * Default value is `300` + +Lifetime in seconds of this metric + +[id="{version}-plugins-{type}s-{plugin}-max_interval"] +===== `max_interval` + + * Value type is <> + * Default value is `60` + +Maximum time in seconds between gmetric calls for this metric. + +[id="{version}-plugins-{type}s-{plugin}-metric"] +===== `metric` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The metric to use. This supports dynamic strings like `%{host}` + +[id="{version}-plugins-{type}s-{plugin}-metric_type"] +===== `metric_type` + + * Value can be any of: `string`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `uint32`, `float`, `double` + * Default value is `"uint8"` + +The type of value for this metric. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8649` + +The port to connect on your ganglia server. + +[id="{version}-plugins-{type}s-{plugin}-slope"] +===== `slope` + + * Value can be any of: `zero`, `positive`, `negative`, `both`, `unspecified` + * Default value is `"both"` + +Metric slope, represents metric behavior + +[id="{version}-plugins-{type}s-{plugin}-units"] +===== `units` + + * Value type is <> + * Default value is `""` + +Gmetric units for metric, such as "kb/sec" or "ms" or whatever unit +this metric uses. + +[id="{version}-plugins-{type}s-{plugin}-value"] +===== `value` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The value to use. This supports dynamic strings like `%{bytes}` +It will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/ganglia-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/ganglia-v3.0.5.asciidoc new file mode 100644 index 000000000..f363aef57 --- /dev/null +++ b/docs/versioned-plugins/outputs/ganglia-v3.0.5.asciidoc @@ -0,0 +1,139 @@ +:plugin: ganglia +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-ganglia/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Ganglia output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you to pull metrics from your logs and ship them to +ganglia's gmond. This is heavily based on the graphite output. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Ganglia Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-group>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-lifetime>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metric>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-metric_type>> |<>, one of `["string", "int8", "uint8", "int16", "uint16", "int32", "uint32", "float", "double"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-slope>> |<>, one of `["zero", "positive", "negative", "both", "unspecified"]`|No +| <<{version}-plugins-{type}s-{plugin}-units>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-value>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-group"] +===== `group` + + * Value type is <> + * Default value is `""` + +Metric group + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The address of the ganglia server. + +[id="{version}-plugins-{type}s-{plugin}-lifetime"] +===== `lifetime` + + * Value type is <> + * Default value is `300` + +Lifetime in seconds of this metric + +[id="{version}-plugins-{type}s-{plugin}-max_interval"] +===== `max_interval` + + * Value type is <> + * Default value is `60` + +Maximum time in seconds between gmetric calls for this metric. + +[id="{version}-plugins-{type}s-{plugin}-metric"] +===== `metric` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The metric to use. This supports dynamic strings like `%{host}` + +[id="{version}-plugins-{type}s-{plugin}-metric_type"] +===== `metric_type` + + * Value can be any of: `string`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `uint32`, `float`, `double` + * Default value is `"uint8"` + +The type of value for this metric. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8649` + +The port to connect on your ganglia server. + +[id="{version}-plugins-{type}s-{plugin}-slope"] +===== `slope` + + * Value can be any of: `zero`, `positive`, `negative`, `both`, `unspecified` + * Default value is `"both"` + +Metric slope, represents metric behavior + +[id="{version}-plugins-{type}s-{plugin}-units"] +===== `units` + + * Value type is <> + * Default value is `""` + +Gmetric units for metric, such as "kb/sec" or "ms" or whatever unit +this metric uses. + +[id="{version}-plugins-{type}s-{plugin}-value"] +===== `value` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The value to use. This supports dynamic strings like `%{bytes}` +It will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/gelf-index.asciidoc b/docs/versioned-plugins/outputs/gelf-index.asciidoc new file mode 100644 index 000000000..3c45e5693 --- /dev/null +++ b/docs/versioned-plugins/outputs/gelf-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: gelf +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::gelf-v3.1.4.asciidoc[] +include::gelf-v3.1.3.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/gelf-v3.1.3.asciidoc b/docs/versioned-plugins/outputs/gelf-v3.1.3.asciidoc new file mode 100644 index 000000000..3795bfa9d --- /dev/null +++ b/docs/versioned-plugins/outputs/gelf-v3.1.3.asciidoc @@ -0,0 +1,167 @@ +:plugin: gelf +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-gelf/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Gelf output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output generates messages in GELF format. This is most useful if you +want to use Logstash to output events to Graylog2. + +More information at http://graylog2.org/gelf#specs[The Graylog2 GELF specs page] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Gelf Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-chunksize>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-custom_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-full_message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ignore_metadata>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-level>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ship_metadata>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ship_tags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-short_message>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-chunksize"] +===== `chunksize` + + * Value type is <> + * Default value is `1420` + +The GELF chunksize. You usually don't need to change this. + +[id="{version}-plugins-{type}s-{plugin}-custom_fields"] +===== `custom_fields` + + * Value type is <> + * Default value is `{}` + +The GELF custom field mappings. GELF supports arbitrary attributes as custom +fields. This exposes that. Exclude the `_` portion of the field name +e.g. `custom_fields => ['foo_field', 'some_value']` +sets `_foo_field` = `some_value`. + +[id="{version}-plugins-{type}s-{plugin}-full_message"] +===== `full_message` + + * Value type is <> + * Default value is `"%{message}"` + +The GELF full message. Dynamic values like `%{foo}` are permitted here. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Graylog2 server IP address or hostname. + +[id="{version}-plugins-{type}s-{plugin}-ignore_metadata"] +===== `ignore_metadata` + + * Value type is <> + * Default value is `["@timestamp", "@version", "severity", "host", "source_host", "source_path", "short_message"]` + +Ignore these fields when `ship_metadata` is set. Typically this lists the +fields used in dynamic values for GELF fields. + +[id="{version}-plugins-{type}s-{plugin}-level"] +===== `level` + + * Value type is <> + * Default value is `["%{severity}", "INFO"]` + +The GELF message level. Dynamic values like `%{level}` are permitted here; +useful if you want to parse the 'log level' from an event and use that +as the GELF level/severity. + +Values here can be integers [0..7] inclusive or any of +"debug", "info", "warn", "error", "fatal" (case insensitive). +Single-character versions of these are also valid, "d", "i", "w", "e", "f", +"u" +The following additional severity\_labels from Logstash's syslog\_pri filter +are accepted: "emergency", "alert", "critical", "warning", "notice", and +"informational". + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `12201` + +Graylog2 server port number. + +[id="{version}-plugins-{type}s-{plugin}-sender"] +===== `sender` + + * Value type is <> + * Default value is `"%{host}"` + +Allow overriding of the GELF `sender` field. This is useful if you +want to use something other than the event's source host as the +"sender" of an event. A common case for this is using the application name +instead of the hostname. + +[id="{version}-plugins-{type}s-{plugin}-ship_metadata"] +===== `ship_metadata` + + * Value type is <> + * Default value is `true` + +Should Logstash ship metadata within event object? This will cause Logstash +to ship any fields in the event (such as those created by grok) in the GELF +messages. These will be sent as underscored "additional fields". + +[id="{version}-plugins-{type}s-{plugin}-ship_tags"] +===== `ship_tags` + + * Value type is <> + * Default value is `true` + +Ship tags within events. This will cause Logstash to ship the tags of an +event as the field `\_tags`. + +[id="{version}-plugins-{type}s-{plugin}-short_message"] +===== `short_message` + + * Value type is <> + * Default value is `"short_message"` + +The GELF short message field name. If the field does not exist or is empty, +the event message is taken instead. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/gelf-v3.1.4.asciidoc b/docs/versioned-plugins/outputs/gelf-v3.1.4.asciidoc new file mode 100644 index 000000000..68db01129 --- /dev/null +++ b/docs/versioned-plugins/outputs/gelf-v3.1.4.asciidoc @@ -0,0 +1,167 @@ +:plugin: gelf +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.4 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-gelf/blob/v3.1.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Gelf output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output generates messages in GELF format. This is most useful if you +want to use Logstash to output events to Graylog2. + +More information at http://graylog2.org/gelf#specs[The Graylog2 GELF specs page] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Gelf Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-chunksize>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-custom_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-full_message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ignore_metadata>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-level>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ship_metadata>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ship_tags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-short_message>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-chunksize"] +===== `chunksize` + + * Value type is <> + * Default value is `1420` + +The GELF chunksize. You usually don't need to change this. + +[id="{version}-plugins-{type}s-{plugin}-custom_fields"] +===== `custom_fields` + + * Value type is <> + * Default value is `{}` + +The GELF custom field mappings. GELF supports arbitrary attributes as custom +fields. This exposes that. Exclude the `_` portion of the field name +e.g. `custom_fields => ['foo_field', 'some_value']` +sets `_foo_field` = `some_value`. + +[id="{version}-plugins-{type}s-{plugin}-full_message"] +===== `full_message` + + * Value type is <> + * Default value is `"%{message}"` + +The GELF full message. Dynamic values like `%{foo}` are permitted here. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Graylog2 server IP address or hostname. + +[id="{version}-plugins-{type}s-{plugin}-ignore_metadata"] +===== `ignore_metadata` + + * Value type is <> + * Default value is `["@timestamp", "@version", "severity", "host", "source_host", "source_path", "short_message"]` + +Ignore these fields when `ship_metadata` is set. Typically this lists the +fields used in dynamic values for GELF fields. + +[id="{version}-plugins-{type}s-{plugin}-level"] +===== `level` + + * Value type is <> + * Default value is `["%{severity}", "INFO"]` + +The GELF message level. Dynamic values like `%{level}` are permitted here; +useful if you want to parse the 'log level' from an event and use that +as the GELF level/severity. + +Values here can be integers [0..7] inclusive or any of +"debug", "info", "warn", "error", "fatal" (case insensitive). +Single-character versions of these are also valid, "d", "i", "w", "e", "f", +"u" +The following additional severity\_labels from Logstash's syslog\_pri filter +are accepted: "emergency", "alert", "critical", "warning", "notice", and +"informational". + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `12201` + +Graylog2 server port number. + +[id="{version}-plugins-{type}s-{plugin}-sender"] +===== `sender` + + * Value type is <> + * Default value is `"%{host}"` + +Allow overriding of the GELF `sender` field. This is useful if you +want to use something other than the event's source host as the +"sender" of an event. A common case for this is using the application name +instead of the hostname. + +[id="{version}-plugins-{type}s-{plugin}-ship_metadata"] +===== `ship_metadata` + + * Value type is <> + * Default value is `true` + +Should Logstash ship metadata within event object? This will cause Logstash +to ship any fields in the event (such as those created by grok) in the GELF +messages. These will be sent as underscored "additional fields". + +[id="{version}-plugins-{type}s-{plugin}-ship_tags"] +===== `ship_tags` + + * Value type is <> + * Default value is `true` + +Ship tags within events. This will cause Logstash to ship the tags of an +event as the field `\_tags`. + +[id="{version}-plugins-{type}s-{plugin}-short_message"] +===== `short_message` + + * Value type is <> + * Default value is `"short_message"` + +The GELF short message field name. If the field does not exist or is empty, +the event message is taken instead. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/gemfire-index.asciidoc b/docs/versioned-plugins/outputs/gemfire-index.asciidoc new file mode 100644 index 000000000..4feed9525 --- /dev/null +++ b/docs/versioned-plugins/outputs/gemfire-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: gemfire +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::gemfire-v2.0.6.asciidoc[] +include::gemfire-v2.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/gemfire-v2.0.5.asciidoc b/docs/versioned-plugins/outputs/gemfire-v2.0.5.asciidoc new file mode 100644 index 000000000..5a4dd14f8 --- /dev/null +++ b/docs/versioned-plugins/outputs/gemfire-v2.0.5.asciidoc @@ -0,0 +1,100 @@ +:plugin: gemfire +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-gemfire/blob/v2.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Gemfire output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push events to a GemFire region. + +GemFire is an object database. + +To use this plugin you need to add gemfire.jar to your CLASSPATH; +using format=json requires jackson.jar too. + +Note: this plugin has only been tested with GemFire 7.0. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Gemfire Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cache_xml_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region_name>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_name"] +===== `cache_name` + + * Value type is <> + * Default value is `"logstash"` + +Your client cache name + +[id="{version}-plugins-{type}s-{plugin}-cache_xml_file"] +===== `cache_xml_file` + + * Value type is <> + * Default value is `nil` + +The path to a GemFire client cache XML file. + +Example: +[source,xml] + + + + + + + + + + + +[id="{version}-plugins-{type}s-{plugin}-key_format"] +===== `key_format` + + * Value type is <> + * Default value is `"%{host}-%{@timestamp}"` + +A sprintf format to use when building keys + +[id="{version}-plugins-{type}s-{plugin}-region_name"] +===== `region_name` + + * Value type is <> + * Default value is `"Logstash"` + +The region name + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/gemfire-v2.0.6.asciidoc b/docs/versioned-plugins/outputs/gemfire-v2.0.6.asciidoc new file mode 100644 index 000000000..457873493 --- /dev/null +++ b/docs/versioned-plugins/outputs/gemfire-v2.0.6.asciidoc @@ -0,0 +1,100 @@ +:plugin: gemfire +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.6 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-gemfire/blob/v2.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Gemfire output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push events to a GemFire region. + +GemFire is an object database. + +To use this plugin you need to add gemfire.jar to your CLASSPATH; +using format=json requires jackson.jar too. + +Note: this plugin has only been tested with GemFire 7.0. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Gemfire Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-cache_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cache_xml_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region_name>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-cache_name"] +===== `cache_name` + + * Value type is <> + * Default value is `"logstash"` + +Your client cache name + +[id="{version}-plugins-{type}s-{plugin}-cache_xml_file"] +===== `cache_xml_file` + + * Value type is <> + * Default value is `nil` + +The path to a GemFire client cache XML file. + +Example: +[source,xml] + + + + + + + + + + + +[id="{version}-plugins-{type}s-{plugin}-key_format"] +===== `key_format` + + * Value type is <> + * Default value is `"%{host}-%{@timestamp}"` + +A sprintf format to use when building keys + +[id="{version}-plugins-{type}s-{plugin}-region_name"] +===== `region_name` + + * Value type is <> + * Default value is `"Logstash"` + +The region name + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/google_bigquery-index.asciidoc b/docs/versioned-plugins/outputs/google_bigquery-index.asciidoc new file mode 100644 index 000000000..5c4e759aa --- /dev/null +++ b/docs/versioned-plugins/outputs/google_bigquery-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: google_bigquery +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::google_bigquery-v3.2.3.asciidoc[] +include::google_bigquery-v3.2.2.asciidoc[] +include::google_bigquery-v3.2.1.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/google_bigquery-v3.2.1.asciidoc b/docs/versioned-plugins/outputs/google_bigquery-v3.2.1.asciidoc new file mode 100644 index 000000000..6bcdbd41f --- /dev/null +++ b/docs/versioned-plugins/outputs/google_bigquery-v3.2.1.asciidoc @@ -0,0 +1,302 @@ +:plugin: google_bigquery +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-google_bigquery/blob/v3.2.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Google_bigquery output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +[source,txt] +----- +Author: Rodrigo De Castro +Date: 2013-09-20 + +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +----- +Summary: plugin to upload log events to Google BigQuery (BQ), rolling +files based on the date pattern provided as a configuration setting. Events +are written to files locally and, once file is closed, this plugin uploads +it to the configured BigQuery dataset. + +VERY IMPORTANT: +. To make good use of BigQuery, your log events should be parsed and +structured. Consider using grok to parse your events into fields that can +be uploaded to BQ. +. You must configure your plugin so it gets events with the same structure, +so the BigQuery schema suits them. In case you want to upload log events +with different structures, you can utilize multiple configuration blocks, +separating different log events with Logstash conditionals. More details on +Logstash conditionals can be found here: +http://logstash.net/docs/1.2.1/configuration#conditionals + +For more info on Google BigQuery, please go to: +https://developers.google.com/bigquery/ + +In order to use this plugin, a Google service account must be used. For +more information, please refer to: +https://developers.google.com/storage/docs/authentication#service_accounts + +Recommendations: + +. Experiment with the settings depending on how much log data you generate, +your needs to see "fresh" data, and how much data you could lose in the event +of crash. For instance, if you want to see recent data in BQ quickly, you +could configure the plugin to upload data every minute or so (provided you +have enough log events to justify that). Note also, that if uploads are too +frequent, there is no guarantee that they will be imported in the same order, +so later data may be available before earlier data. + +. BigQuery charges for storage and for queries, depending on how much data +it reads to perform a query. These are other aspects to consider when +considering the date pattern which will be used to create new tables and also +how to compose the queries when using BQ. For more info on BigQuery Pricing, +please access: +https://developers.google.com/bigquery/pricing + +USAGE: +This is an example of logstash config: + +[source,json] +-------------------------- +output { + google_bigquery { + project_id => "folkloric-guru-278" (required) + dataset => "logs" (required) + csv_schema => "path:STRING,status:INTEGER,score:FLOAT" (required) <1> + key_path => "/path/to/privatekey.p12" (required) + key_password => "notasecret" (optional) + service_account => "1234@developer.gserviceaccount.com" (required) + temp_directory => "/tmp/logstash-bq" (optional) + temp_file_prefix => "logstash_bq" (optional) + date_pattern => "%Y-%m-%dT%H:00" (optional) + flush_interval_secs => 2 (optional) + uploader_interval_secs => 60 (optional) + deleter_interval_secs => 60 (optional) + } +} +-------------------------- + +<1> Specify either a csv_schema or a json_schema. + +* Refactor common code between Google BQ and GCS plugins. +* Turn Google API code into a Plugin Mixin (like AwsConfig). +* There's no recover method, so if logstash/plugin crashes, files may not +be uploaded to BQ. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Google_bigquery Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-csv_schema>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dataset>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-date_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-deleter_interval_secs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval_secs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ignore_unknown_values>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-json_schema>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-service_account>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-table_prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-table_separator>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-temp_directory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-temp_file_prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-uploader_interval_secs>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-csv_schema"] +===== `csv_schema` + + * Value type is <> + * Default value is `nil` + +Schema for log data. It must follow this format: +:,:,... +Example: path:STRING,status:INTEGER,score:FLOAT + +[id="{version}-plugins-{type}s-{plugin}-dataset"] +===== `dataset` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +BigQuery dataset to which these events will be added to. + +[id="{version}-plugins-{type}s-{plugin}-date_pattern"] +===== `date_pattern` + + * Value type is <> + * Default value is `"%Y-%m-%dT%H:00"` + +Time pattern for BigQuery table, defaults to hourly tables. +Must Time.strftime patterns: www.ruby-doc.org/core-2.0/Time.html#method-i-strftime + +[id="{version}-plugins-{type}s-{plugin}-deleter_interval_secs"] +===== `deleter_interval_secs` + + * Value type is <> + * Default value is `60` + +Deleter interval when checking if upload jobs are done for file deletion. +This only affects how long files are on the hard disk after the job is done. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval_secs"] +===== `flush_interval_secs` + + * Value type is <> + * Default value is `2` + +Flush interval in seconds for flushing writes to log files. 0 will flush +on every message. + +[id="{version}-plugins-{type}s-{plugin}-ignore_unknown_values"] +===== `ignore_unknown_values` + + * Value type is <> + * Default value is `false` + +Indicates if BigQuery should allow extra values that are not represented in the table schema. +If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. + +[id="{version}-plugins-{type}s-{plugin}-json_schema"] +===== `json_schema` + + * Value type is <> + * Default value is `nil` + +Schema for log data, as a hash. Example: +json_schema => { + fields => [{ + name => "timestamp" + type => "TIMESTAMP" + }, { + name => "host" + type => "STRING" + }, { + name => "message" + type => "STRING" + }] +} + +[id="{version}-plugins-{type}s-{plugin}-key_password"] +===== `key_password` + + * Value type is <> + * Default value is `"notasecret"` + +Private key password for service account private key. + +[id="{version}-plugins-{type}s-{plugin}-key_path"] +===== `key_path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Path to private key file for Google Service Account. + +[id="{version}-plugins-{type}s-{plugin}-project_id"] +===== `project_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Google Cloud Project ID (number, not Project Name!). + +[id="{version}-plugins-{type}s-{plugin}-service_account"] +===== `service_account` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Service account to access Google APIs. + +[id="{version}-plugins-{type}s-{plugin}-table_prefix"] +===== `table_prefix` + + * Value type is <> + * Default value is `"logstash"` + +BigQuery table ID prefix to be used when creating new tables for log data. +Table name will be + +[id="{version}-plugins-{type}s-{plugin}-table_separator"] +===== `table_separator` + + * Value type is <> + * Default value is `"_"` + +BigQuery table separator to be added between the table_prefix and the +date suffix. + +[id="{version}-plugins-{type}s-{plugin}-temp_directory"] +===== `temp_directory` + + * Value type is <> + * Default value is `""` + +Directory where temporary files are stored. +Defaults to /tmp/logstash-bq- + +[id="{version}-plugins-{type}s-{plugin}-temp_file_prefix"] +===== `temp_file_prefix` + + * Value type is <> + * Default value is `"logstash_bq"` + +Temporary local file prefix. Log file will follow the format: +_hostname_date.part?.log + +[id="{version}-plugins-{type}s-{plugin}-uploader_interval_secs"] +===== `uploader_interval_secs` + + * Value type is <> + * Default value is `60` + +Uploader interval when uploading new files to BigQuery. Adjust time based +on your time pattern (for example, for hourly files, this interval can be +around one hour). + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/google_bigquery-v3.2.2.asciidoc b/docs/versioned-plugins/outputs/google_bigquery-v3.2.2.asciidoc new file mode 100644 index 000000000..39fa46261 --- /dev/null +++ b/docs/versioned-plugins/outputs/google_bigquery-v3.2.2.asciidoc @@ -0,0 +1,302 @@ +:plugin: google_bigquery +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.2 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-google_bigquery/blob/v3.2.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Google_bigquery output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +[source,txt] +----- +Author: Rodrigo De Castro +Date: 2013-09-20 + +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +----- +Summary: plugin to upload log events to Google BigQuery (BQ), rolling +files based on the date pattern provided as a configuration setting. Events +are written to files locally and, once file is closed, this plugin uploads +it to the configured BigQuery dataset. + +VERY IMPORTANT: +. To make good use of BigQuery, your log events should be parsed and +structured. Consider using grok to parse your events into fields that can +be uploaded to BQ. +. You must configure your plugin so it gets events with the same structure, +so the BigQuery schema suits them. In case you want to upload log events +with different structures, you can utilize multiple configuration blocks, +separating different log events with Logstash conditionals. More details on +Logstash conditionals can be found here: +http://logstash.net/docs/1.2.1/configuration#conditionals + +For more info on Google BigQuery, please go to: +https://developers.google.com/bigquery/ + +In order to use this plugin, a Google service account must be used. For +more information, please refer to: +https://developers.google.com/storage/docs/authentication#service_accounts + +Recommendations: + +. Experiment with the settings depending on how much log data you generate, +your needs to see "fresh" data, and how much data you could lose in the event +of crash. For instance, if you want to see recent data in BQ quickly, you +could configure the plugin to upload data every minute or so (provided you +have enough log events to justify that). Note also, that if uploads are too +frequent, there is no guarantee that they will be imported in the same order, +so later data may be available before earlier data. + +. BigQuery charges for storage and for queries, depending on how much data +it reads to perform a query. These are other aspects to consider when +considering the date pattern which will be used to create new tables and also +how to compose the queries when using BQ. For more info on BigQuery Pricing, +please access: +https://developers.google.com/bigquery/pricing + +USAGE: +This is an example of logstash config: + +[source,json] +-------------------------- +output { + google_bigquery { + project_id => "folkloric-guru-278" (required) + dataset => "logs" (required) + csv_schema => "path:STRING,status:INTEGER,score:FLOAT" (required) <1> + key_path => "/path/to/privatekey.p12" (required) + key_password => "notasecret" (optional) + service_account => "1234@developer.gserviceaccount.com" (required) + temp_directory => "/tmp/logstash-bq" (optional) + temp_file_prefix => "logstash_bq" (optional) + date_pattern => "%Y-%m-%dT%H:00" (optional) + flush_interval_secs => 2 (optional) + uploader_interval_secs => 60 (optional) + deleter_interval_secs => 60 (optional) + } +} +-------------------------- + +<1> Specify either a csv_schema or a json_schema. + +* Refactor common code between Google BQ and GCS plugins. +* Turn Google API code into a Plugin Mixin (like AwsConfig). +* There's no recover method, so if logstash/plugin crashes, files may not +be uploaded to BQ. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Google_bigquery Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-csv_schema>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dataset>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-date_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-deleter_interval_secs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval_secs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ignore_unknown_values>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-json_schema>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-service_account>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-table_prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-table_separator>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-temp_directory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-temp_file_prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-uploader_interval_secs>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-csv_schema"] +===== `csv_schema` + + * Value type is <> + * Default value is `nil` + +Schema for log data. It must follow this format: +:,:,... +Example: path:STRING,status:INTEGER,score:FLOAT + +[id="{version}-plugins-{type}s-{plugin}-dataset"] +===== `dataset` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +BigQuery dataset to which these events will be added to. + +[id="{version}-plugins-{type}s-{plugin}-date_pattern"] +===== `date_pattern` + + * Value type is <> + * Default value is `"%Y-%m-%dT%H:00"` + +Time pattern for BigQuery table, defaults to hourly tables. +Must Time.strftime patterns: www.ruby-doc.org/core-2.0/Time.html#method-i-strftime + +[id="{version}-plugins-{type}s-{plugin}-deleter_interval_secs"] +===== `deleter_interval_secs` + + * Value type is <> + * Default value is `60` + +Deleter interval when checking if upload jobs are done for file deletion. +This only affects how long files are on the hard disk after the job is done. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval_secs"] +===== `flush_interval_secs` + + * Value type is <> + * Default value is `2` + +Flush interval in seconds for flushing writes to log files. 0 will flush +on every message. + +[id="{version}-plugins-{type}s-{plugin}-ignore_unknown_values"] +===== `ignore_unknown_values` + + * Value type is <> + * Default value is `false` + +Indicates if BigQuery should allow extra values that are not represented in the table schema. +If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. + +[id="{version}-plugins-{type}s-{plugin}-json_schema"] +===== `json_schema` + + * Value type is <> + * Default value is `nil` + +Schema for log data, as a hash. Example: +json_schema => { + fields => [{ + name => "timestamp" + type => "TIMESTAMP" + }, { + name => "host" + type => "STRING" + }, { + name => "message" + type => "STRING" + }] +} + +[id="{version}-plugins-{type}s-{plugin}-key_password"] +===== `key_password` + + * Value type is <> + * Default value is `"notasecret"` + +Private key password for service account private key. + +[id="{version}-plugins-{type}s-{plugin}-key_path"] +===== `key_path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Path to private key file for Google Service Account. + +[id="{version}-plugins-{type}s-{plugin}-project_id"] +===== `project_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Google Cloud Project ID (number, not Project Name!). + +[id="{version}-plugins-{type}s-{plugin}-service_account"] +===== `service_account` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Service account to access Google APIs. + +[id="{version}-plugins-{type}s-{plugin}-table_prefix"] +===== `table_prefix` + + * Value type is <> + * Default value is `"logstash"` + +BigQuery table ID prefix to be used when creating new tables for log data. +Table name will be + +[id="{version}-plugins-{type}s-{plugin}-table_separator"] +===== `table_separator` + + * Value type is <> + * Default value is `"_"` + +BigQuery table separator to be added between the table_prefix and the +date suffix. + +[id="{version}-plugins-{type}s-{plugin}-temp_directory"] +===== `temp_directory` + + * Value type is <> + * Default value is `""` + +Directory where temporary files are stored. +Defaults to /tmp/logstash-bq- + +[id="{version}-plugins-{type}s-{plugin}-temp_file_prefix"] +===== `temp_file_prefix` + + * Value type is <> + * Default value is `"logstash_bq"` + +Temporary local file prefix. Log file will follow the format: +_hostname_date.part?.log + +[id="{version}-plugins-{type}s-{plugin}-uploader_interval_secs"] +===== `uploader_interval_secs` + + * Value type is <> + * Default value is `60` + +Uploader interval when uploading new files to BigQuery. Adjust time based +on your time pattern (for example, for hourly files, this interval can be +around one hour). + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/google_bigquery-v3.2.3.asciidoc b/docs/versioned-plugins/outputs/google_bigquery-v3.2.3.asciidoc new file mode 100644 index 000000000..8ddb695ed --- /dev/null +++ b/docs/versioned-plugins/outputs/google_bigquery-v3.2.3.asciidoc @@ -0,0 +1,302 @@ +:plugin: google_bigquery +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.2.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-google_bigquery/blob/v3.2.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Google_bigquery output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +[source,txt] +----- +Author: Rodrigo De Castro +Date: 2013-09-20 + +Copyright 2013 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +----- +Summary: plugin to upload log events to Google BigQuery (BQ), rolling +files based on the date pattern provided as a configuration setting. Events +are written to files locally and, once file is closed, this plugin uploads +it to the configured BigQuery dataset. + +VERY IMPORTANT: +. To make good use of BigQuery, your log events should be parsed and +structured. Consider using grok to parse your events into fields that can +be uploaded to BQ. +. You must configure your plugin so it gets events with the same structure, +so the BigQuery schema suits them. In case you want to upload log events +with different structures, you can utilize multiple configuration blocks, +separating different log events with Logstash conditionals. More details on +Logstash conditionals can be found here: +http://logstash.net/docs/1.2.1/configuration#conditionals + +For more info on Google BigQuery, please go to: +https://developers.google.com/bigquery/ + +In order to use this plugin, a Google service account must be used. For +more information, please refer to: +https://developers.google.com/storage/docs/authentication#service_accounts + +Recommendations: + +. Experiment with the settings depending on how much log data you generate, +your needs to see "fresh" data, and how much data you could lose in the event +of crash. For instance, if you want to see recent data in BQ quickly, you +could configure the plugin to upload data every minute or so (provided you +have enough log events to justify that). Note also, that if uploads are too +frequent, there is no guarantee that they will be imported in the same order, +so later data may be available before earlier data. + +. BigQuery charges for storage and for queries, depending on how much data +it reads to perform a query. These are other aspects to consider when +considering the date pattern which will be used to create new tables and also +how to compose the queries when using BQ. For more info on BigQuery Pricing, +please access: +https://developers.google.com/bigquery/pricing + +USAGE: +This is an example of logstash config: + +[source,json] +-------------------------- +output { + google_bigquery { + project_id => "folkloric-guru-278" (required) + dataset => "logs" (required) + csv_schema => "path:STRING,status:INTEGER,score:FLOAT" (required) <1> + key_path => "/path/to/privatekey.p12" (required) + key_password => "notasecret" (optional) + service_account => "1234@developer.gserviceaccount.com" (required) + temp_directory => "/tmp/logstash-bq" (optional) + temp_file_prefix => "logstash_bq" (optional) + date_pattern => "%Y-%m-%dT%H:00" (optional) + flush_interval_secs => 2 (optional) + uploader_interval_secs => 60 (optional) + deleter_interval_secs => 60 (optional) + } +} +-------------------------- + +<1> Specify either a csv_schema or a json_schema. + +* Refactor common code between Google BQ and GCS plugins. +* Turn Google API code into a Plugin Mixin (like AwsConfig). +* There's no recover method, so if logstash/plugin crashes, files may not +be uploaded to BQ. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Google_bigquery Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-csv_schema>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dataset>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-date_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-deleter_interval_secs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval_secs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ignore_unknown_values>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-json_schema>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-service_account>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-table_prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-table_separator>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-temp_directory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-temp_file_prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-uploader_interval_secs>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-csv_schema"] +===== `csv_schema` + + * Value type is <> + * Default value is `nil` + +Schema for log data. It must follow this format: +:,:,... +Example: path:STRING,status:INTEGER,score:FLOAT + +[id="{version}-plugins-{type}s-{plugin}-dataset"] +===== `dataset` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +BigQuery dataset to which these events will be added to. + +[id="{version}-plugins-{type}s-{plugin}-date_pattern"] +===== `date_pattern` + + * Value type is <> + * Default value is `"%Y-%m-%dT%H:00"` + +Time pattern for BigQuery table, defaults to hourly tables. +Must Time.strftime patterns: www.ruby-doc.org/core-2.0/Time.html#method-i-strftime + +[id="{version}-plugins-{type}s-{plugin}-deleter_interval_secs"] +===== `deleter_interval_secs` + + * Value type is <> + * Default value is `60` + +Deleter interval when checking if upload jobs are done for file deletion. +This only affects how long files are on the hard disk after the job is done. + +[id="{version}-plugins-{type}s-{plugin}-flush_interval_secs"] +===== `flush_interval_secs` + + * Value type is <> + * Default value is `2` + +Flush interval in seconds for flushing writes to log files. 0 will flush +on every message. + +[id="{version}-plugins-{type}s-{plugin}-ignore_unknown_values"] +===== `ignore_unknown_values` + + * Value type is <> + * Default value is `false` + +Indicates if BigQuery should allow extra values that are not represented in the table schema. +If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. + +[id="{version}-plugins-{type}s-{plugin}-json_schema"] +===== `json_schema` + + * Value type is <> + * Default value is `nil` + +Schema for log data, as a hash. Example: +json_schema => { + fields => [{ + name => "timestamp" + type => "TIMESTAMP" + }, { + name => "host" + type => "STRING" + }, { + name => "message" + type => "STRING" + }] +} + +[id="{version}-plugins-{type}s-{plugin}-key_password"] +===== `key_password` + + * Value type is <> + * Default value is `"notasecret"` + +Private key password for service account private key. + +[id="{version}-plugins-{type}s-{plugin}-key_path"] +===== `key_path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Path to private key file for Google Service Account. + +[id="{version}-plugins-{type}s-{plugin}-project_id"] +===== `project_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Google Cloud Project ID (number, not Project Name!). + +[id="{version}-plugins-{type}s-{plugin}-service_account"] +===== `service_account` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Service account to access Google APIs. + +[id="{version}-plugins-{type}s-{plugin}-table_prefix"] +===== `table_prefix` + + * Value type is <> + * Default value is `"logstash"` + +BigQuery table ID prefix to be used when creating new tables for log data. +Table name will be + +[id="{version}-plugins-{type}s-{plugin}-table_separator"] +===== `table_separator` + + * Value type is <> + * Default value is `"_"` + +BigQuery table separator to be added between the table_prefix and the +date suffix. + +[id="{version}-plugins-{type}s-{plugin}-temp_directory"] +===== `temp_directory` + + * Value type is <> + * Default value is `""` + +Directory where temporary files are stored. +Defaults to /tmp/logstash-bq- + +[id="{version}-plugins-{type}s-{plugin}-temp_file_prefix"] +===== `temp_file_prefix` + + * Value type is <> + * Default value is `"logstash_bq"` + +Temporary local file prefix. Log file will follow the format: +_hostname_date.part?.log + +[id="{version}-plugins-{type}s-{plugin}-uploader_interval_secs"] +===== `uploader_interval_secs` + + * Value type is <> + * Default value is `60` + +Uploader interval when uploading new files to BigQuery. Adjust time based +on your time pattern (for example, for hourly files, this interval can be +around one hour). + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/google_cloud_storage-index.asciidoc b/docs/versioned-plugins/outputs/google_cloud_storage-index.asciidoc new file mode 100644 index 000000000..e1ae4540e --- /dev/null +++ b/docs/versioned-plugins/outputs/google_cloud_storage-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: google_cloud_storage +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::google_cloud_storage-v3.0.4.asciidoc[] +include::google_cloud_storage-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/google_cloud_storage-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/google_cloud_storage-v3.0.3.asciidoc new file mode 100644 index 000000000..138f559ac --- /dev/null +++ b/docs/versioned-plugins/outputs/google_cloud_storage-v3.0.3.asciidoc @@ -0,0 +1,206 @@ +:plugin: google_cloud_storage +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-google_cloud_storage/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Google_cloud_storage output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Summary: plugin to upload log events to Google Cloud Storage (GCS), rolling +files based on the date pattern provided as a configuration setting. Events +are written to files locally and, once file is closed, this plugin uploads +it to the configured bucket. + +For more info on Google Cloud Storage, please go to: +https://cloud.google.com/products/cloud-storage + +In order to use this plugin, a Google service account must be used. For +more information, please refer to: +https://developers.google.com/storage/docs/authentication#service_accounts + +Recommendation: experiment with the settings depending on how much log +data you generate, so the uploader can keep up with the generated logs. +Using gzip output can be a good option to reduce network traffic when +uploading the log files and in terms of storage costs as well. + +USAGE: +This is an example of logstash config: + +[source,json] +-------------------------- +output { + google_cloud_storage { + bucket => "my_bucket" (required) + key_path => "/path/to/privatekey.p12" (required) + key_password => "notasecret" (optional) + service_account => "1234@developer.gserviceaccount.com" (required) + temp_directory => "/tmp/logstash-gcs" (optional) + log_file_prefix => "logstash_gcs" (optional) + max_file_size_kbytes => 1024 (optional) + output_format => "plain" (optional) + date_pattern => "%Y-%m-%dT%H:00" (optional) + flush_interval_secs => 2 (optional) + gzip => false (optional) + uploader_interval_secs => 60 (optional) + } +} +-------------------------- + +* Support logstash event variables to determine filename. +* Turn Google API code into a Plugin Mixin (like AwsConfig). +* There's no recover method, so if logstash/plugin crashes, files may not +be uploaded to GCS. +* Allow user to configure file name. +* Allow parallel uploads for heavier loads (+ connection configuration if +exposed by Ruby API client) + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Google_cloud_storage Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-date_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval_secs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-log_file_prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_file_size_kbytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-output_format>> |<>, one of `["json", "plain"]`|No +| <<{version}-plugins-{type}s-{plugin}-service_account>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-temp_directory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-uploader_interval_secs>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +GCS bucket name, without "gs://" or any other prefix. + +[id="{version}-plugins-{type}s-{plugin}-date_pattern"] +===== `date_pattern` + + * Value type is <> + * Default value is `"%Y-%m-%dT%H:00"` + +Time pattern for log file, defaults to hourly files. +Must Time.strftime patterns: www.ruby-doc.org/core-2.0/Time.html#method-i-strftime + +[id="{version}-plugins-{type}s-{plugin}-flush_interval_secs"] +===== `flush_interval_secs` + + * Value type is <> + * Default value is `2` + +Flush interval in seconds for flushing writes to log files. 0 will flush +on every message. + +[id="{version}-plugins-{type}s-{plugin}-gzip"] +===== `gzip` + + * Value type is <> + * Default value is `false` + +Gzip output stream when writing events to log files. + +[id="{version}-plugins-{type}s-{plugin}-key_password"] +===== `key_password` + + * Value type is <> + * Default value is `"notasecret"` + +GCS private key password. + +[id="{version}-plugins-{type}s-{plugin}-key_path"] +===== `key_path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +GCS path to private key file. + +[id="{version}-plugins-{type}s-{plugin}-log_file_prefix"] +===== `log_file_prefix` + + * Value type is <> + * Default value is `"logstash_gcs"` + +Log file prefix. Log file will follow the format: +_hostname_date<.part?>.log + +[id="{version}-plugins-{type}s-{plugin}-max_file_size_kbytes"] +===== `max_file_size_kbytes` + + * Value type is <> + * Default value is `10000` + +Sets max file size in kbytes. 0 disable max file check. + +[id="{version}-plugins-{type}s-{plugin}-output_format"] +===== `output_format` + + * Value can be any of: `json`, `plain` + * Default value is `"plain"` + +The event format you want to store in files. Defaults to plain text. + +[id="{version}-plugins-{type}s-{plugin}-service_account"] +===== `service_account` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +GCS service account. + +[id="{version}-plugins-{type}s-{plugin}-temp_directory"] +===== `temp_directory` + + * Value type is <> + * Default value is `""` + +Directory where temporary files are stored. +Defaults to /tmp/logstash-gcs- + +[id="{version}-plugins-{type}s-{plugin}-uploader_interval_secs"] +===== `uploader_interval_secs` + + * Value type is <> + * Default value is `60` + +Uploader interval when uploading new files to GCS. Adjust time based +on your time pattern (for example, for hourly files, this interval can be +around one hour). + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/google_cloud_storage-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/google_cloud_storage-v3.0.4.asciidoc new file mode 100644 index 000000000..accf9fd08 --- /dev/null +++ b/docs/versioned-plugins/outputs/google_cloud_storage-v3.0.4.asciidoc @@ -0,0 +1,206 @@ +:plugin: google_cloud_storage +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-google_cloud_storage/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Google_cloud_storage output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Summary: plugin to upload log events to Google Cloud Storage (GCS), rolling +files based on the date pattern provided as a configuration setting. Events +are written to files locally and, once file is closed, this plugin uploads +it to the configured bucket. + +For more info on Google Cloud Storage, please go to: +https://cloud.google.com/products/cloud-storage + +In order to use this plugin, a Google service account must be used. For +more information, please refer to: +https://developers.google.com/storage/docs/authentication#service_accounts + +Recommendation: experiment with the settings depending on how much log +data you generate, so the uploader can keep up with the generated logs. +Using gzip output can be a good option to reduce network traffic when +uploading the log files and in terms of storage costs as well. + +USAGE: +This is an example of logstash config: + +[source,json] +-------------------------- +output { + google_cloud_storage { + bucket => "my_bucket" (required) + key_path => "/path/to/privatekey.p12" (required) + key_password => "notasecret" (optional) + service_account => "1234@developer.gserviceaccount.com" (required) + temp_directory => "/tmp/logstash-gcs" (optional) + log_file_prefix => "logstash_gcs" (optional) + max_file_size_kbytes => 1024 (optional) + output_format => "plain" (optional) + date_pattern => "%Y-%m-%dT%H:00" (optional) + flush_interval_secs => 2 (optional) + gzip => false (optional) + uploader_interval_secs => 60 (optional) + } +} +-------------------------- + +* Support logstash event variables to determine filename. +* Turn Google API code into a Plugin Mixin (like AwsConfig). +* There's no recover method, so if logstash/plugin crashes, files may not +be uploaded to GCS. +* Allow user to configure file name. +* Allow parallel uploads for heavier loads (+ connection configuration if +exposed by Ruby API client) + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Google_cloud_storage Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-date_pattern>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval_secs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-log_file_prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_file_size_kbytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-output_format>> |<>, one of `["json", "plain"]`|No +| <<{version}-plugins-{type}s-{plugin}-service_account>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-temp_directory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-uploader_interval_secs>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +GCS bucket name, without "gs://" or any other prefix. + +[id="{version}-plugins-{type}s-{plugin}-date_pattern"] +===== `date_pattern` + + * Value type is <> + * Default value is `"%Y-%m-%dT%H:00"` + +Time pattern for log file, defaults to hourly files. +Must Time.strftime patterns: www.ruby-doc.org/core-2.0/Time.html#method-i-strftime + +[id="{version}-plugins-{type}s-{plugin}-flush_interval_secs"] +===== `flush_interval_secs` + + * Value type is <> + * Default value is `2` + +Flush interval in seconds for flushing writes to log files. 0 will flush +on every message. + +[id="{version}-plugins-{type}s-{plugin}-gzip"] +===== `gzip` + + * Value type is <> + * Default value is `false` + +Gzip output stream when writing events to log files. + +[id="{version}-plugins-{type}s-{plugin}-key_password"] +===== `key_password` + + * Value type is <> + * Default value is `"notasecret"` + +GCS private key password. + +[id="{version}-plugins-{type}s-{plugin}-key_path"] +===== `key_path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +GCS path to private key file. + +[id="{version}-plugins-{type}s-{plugin}-log_file_prefix"] +===== `log_file_prefix` + + * Value type is <> + * Default value is `"logstash_gcs"` + +Log file prefix. Log file will follow the format: +_hostname_date<.part?>.log + +[id="{version}-plugins-{type}s-{plugin}-max_file_size_kbytes"] +===== `max_file_size_kbytes` + + * Value type is <> + * Default value is `10000` + +Sets max file size in kbytes. 0 disable max file check. + +[id="{version}-plugins-{type}s-{plugin}-output_format"] +===== `output_format` + + * Value can be any of: `json`, `plain` + * Default value is `"plain"` + +The event format you want to store in files. Defaults to plain text. + +[id="{version}-plugins-{type}s-{plugin}-service_account"] +===== `service_account` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +GCS service account. + +[id="{version}-plugins-{type}s-{plugin}-temp_directory"] +===== `temp_directory` + + * Value type is <> + * Default value is `""` + +Directory where temporary files are stored. +Defaults to /tmp/logstash-gcs- + +[id="{version}-plugins-{type}s-{plugin}-uploader_interval_secs"] +===== `uploader_interval_secs` + + * Value type is <> + * Default value is `60` + +Uploader interval when uploading new files to GCS. Adjust time based +on your time pattern (for example, for hourly files, this interval can be +around one hour). + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/graphite-index.asciidoc b/docs/versioned-plugins/outputs/graphite-index.asciidoc new file mode 100644 index 000000000..a9048c6d4 --- /dev/null +++ b/docs/versioned-plugins/outputs/graphite-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: graphite +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::graphite-v3.1.4.asciidoc[] +include::graphite-v3.1.3.asciidoc[] +include::graphite-v3.1.2.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/graphite-v3.1.2.asciidoc b/docs/versioned-plugins/outputs/graphite-v3.1.2.asciidoc new file mode 100644 index 000000000..66a44ee5d --- /dev/null +++ b/docs/versioned-plugins/outputs/graphite-v3.1.2.asciidoc @@ -0,0 +1,173 @@ +:plugin: graphite +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-graphite/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Graphite output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you to pull metrics from your logs and ship them to +Graphite. Graphite is an open source tool for storing and graphing metrics. + +An example use case: Some applications emit aggregated stats in the logs +every 10 seconds. Using the grok filter and this output, it is possible to +capture the metric values from the logs and emit them to Graphite. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Graphite Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nested_object_separator>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resend_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timestamp_field>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-exclude_metrics"] +===== `exclude_metrics` + + * Value type is <> + * Default value is `["%{[^}]+}"]` + +Exclude regex matched metric names, by default exclude unresolved %{field} strings. + +[id="{version}-plugins-{type}s-{plugin}-fields_are_metrics"] +===== `fields_are_metrics` + + * Value type is <> + * Default value is `false` + +An array indicating that these event fields should be treated as metrics +and will be sent verbatim to Graphite. You may use either `fields_are_metrics` +or `metrics`, but not both. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The hostname or IP address of the Graphite server. + +[id="{version}-plugins-{type}s-{plugin}-include_metrics"] +===== `include_metrics` + + * Value type is <> + * Default value is `[".*"]` + +Include only regex matched metric names. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value type is <> + * Default value is `{}` + +The metric(s) to use. This supports dynamic strings like %{host} +for metric names and also for values. This is a hash field with key +being the metric name, value being the metric value. Example: +[source,ruby] + metrics => { "%{host}/uptime" => "%{uptime_1m}" } + +The value will be coerced to a floating point value. Values which cannot be +coerced will be set to zero (0). You may use either `metrics` or `fields_are_metrics`, +but not both. + +[id="{version}-plugins-{type}s-{plugin}-metrics_format"] +===== `metrics_format` + + * Value type is <> + * Default value is `"*"` + +Defines the format of the metric string. The placeholder '*' will be +replaced with the name of the actual metric. +[source,ruby] + metrics_format => "foo.bar.*.sum" + +NOTE: If no metrics_format is defined, the name of the metric will be used as fallback. + +[id="{version}-plugins-{type}s-{plugin}-nested_object_separator"] +===== `nested_object_separator` + + * Value type is <> + * Default value is `"."` + +When hashes are passed in as values they are broken out into a dotted notation +For instance if you configure this plugin with +# [source,ruby] + metrics => "mymetrics" + +and "mymetrics" is a nested hash of '{a => 1, b => { c => 2 }}' +this plugin will generate two metrics: a => 1, and b.c => 2 . +If you've specified a 'metrics_format' it will respect that, +but you still may want control over the separator within these nested key names. +This config setting changes the separator from the '.' default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `2003` + +The port to connect to on the Graphite server. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `2` + +Interval between reconnect attempts to Carbon. + +[id="{version}-plugins-{type}s-{plugin}-resend_on_failure"] +===== `resend_on_failure` + + * Value type is <> + * Default value is `false` + +Should metrics be resent on failure? + +[id="{version}-plugins-{type}s-{plugin}-timestamp_field"] +===== `timestamp_field` + + * Value type is <> + * Default value is `"@timestamp"` + +Use this field for the timestamp instead of '@timestamp' which is the +default. Useful when backfilling or just getting more accurate data into +graphite since you probably have a cache layer infront of Logstash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/graphite-v3.1.3.asciidoc b/docs/versioned-plugins/outputs/graphite-v3.1.3.asciidoc new file mode 100644 index 000000000..397880ed7 --- /dev/null +++ b/docs/versioned-plugins/outputs/graphite-v3.1.3.asciidoc @@ -0,0 +1,173 @@ +:plugin: graphite +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-graphite/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Graphite output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you to pull metrics from your logs and ship them to +Graphite. Graphite is an open source tool for storing and graphing metrics. + +An example use case: Some applications emit aggregated stats in the logs +every 10 seconds. Using the grok filter and this output, it is possible to +capture the metric values from the logs and emit them to Graphite. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Graphite Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nested_object_separator>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resend_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timestamp_field>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-exclude_metrics"] +===== `exclude_metrics` + + * Value type is <> + * Default value is `["%{[^}]+}"]` + +Exclude regex matched metric names, by default exclude unresolved %{field} strings. + +[id="{version}-plugins-{type}s-{plugin}-fields_are_metrics"] +===== `fields_are_metrics` + + * Value type is <> + * Default value is `false` + +An array indicating that these event fields should be treated as metrics +and will be sent verbatim to Graphite. You may use either `fields_are_metrics` +or `metrics`, but not both. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The hostname or IP address of the Graphite server. + +[id="{version}-plugins-{type}s-{plugin}-include_metrics"] +===== `include_metrics` + + * Value type is <> + * Default value is `[".*"]` + +Include only regex matched metric names. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value type is <> + * Default value is `{}` + +The metric(s) to use. This supports dynamic strings like %{host} +for metric names and also for values. This is a hash field with key +being the metric name, value being the metric value. Example: +[source,ruby] + metrics => { "%{host}/uptime" => "%{uptime_1m}" } + +The value will be coerced to a floating point value. Values which cannot be +coerced will be set to zero (0). You may use either `metrics` or `fields_are_metrics`, +but not both. + +[id="{version}-plugins-{type}s-{plugin}-metrics_format"] +===== `metrics_format` + + * Value type is <> + * Default value is `"*"` + +Defines the format of the metric string. The placeholder '*' will be +replaced with the name of the actual metric. +[source,ruby] + metrics_format => "foo.bar.*.sum" + +NOTE: If no metrics_format is defined, the name of the metric will be used as fallback. + +[id="{version}-plugins-{type}s-{plugin}-nested_object_separator"] +===== `nested_object_separator` + + * Value type is <> + * Default value is `"."` + +When hashes are passed in as values they are broken out into a dotted notation +For instance if you configure this plugin with +# [source,ruby] + metrics => "mymetrics" + +and "mymetrics" is a nested hash of '{a => 1, b => { c => 2 }}' +this plugin will generate two metrics: a => 1, and b.c => 2 . +If you've specified a 'metrics_format' it will respect that, +but you still may want control over the separator within these nested key names. +This config setting changes the separator from the '.' default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `2003` + +The port to connect to on the Graphite server. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `2` + +Interval between reconnect attempts to Carbon. + +[id="{version}-plugins-{type}s-{plugin}-resend_on_failure"] +===== `resend_on_failure` + + * Value type is <> + * Default value is `false` + +Should metrics be resent on failure? + +[id="{version}-plugins-{type}s-{plugin}-timestamp_field"] +===== `timestamp_field` + + * Value type is <> + * Default value is `"@timestamp"` + +Use this field for the timestamp instead of '@timestamp' which is the +default. Useful when backfilling or just getting more accurate data into +graphite since you probably have a cache layer infront of Logstash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/graphite-v3.1.4.asciidoc b/docs/versioned-plugins/outputs/graphite-v3.1.4.asciidoc new file mode 100644 index 000000000..ed950aa5e --- /dev/null +++ b/docs/versioned-plugins/outputs/graphite-v3.1.4.asciidoc @@ -0,0 +1,173 @@ +:plugin: graphite +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.4 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-graphite/blob/v3.1.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Graphite output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you to pull metrics from your logs and ship them to +Graphite. Graphite is an open source tool for storing and graphing metrics. + +An example use case: Some applications emit aggregated stats in the logs +every 10 seconds. Using the grok filter and this output, it is possible to +capture the metric values from the logs and emit them to Graphite. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Graphite Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nested_object_separator>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-resend_on_failure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timestamp_field>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-exclude_metrics"] +===== `exclude_metrics` + + * Value type is <> + * Default value is `["%{[^}]+}"]` + +Exclude regex matched metric names, by default exclude unresolved %{field} strings. + +[id="{version}-plugins-{type}s-{plugin}-fields_are_metrics"] +===== `fields_are_metrics` + + * Value type is <> + * Default value is `false` + +An array indicating that these event fields should be treated as metrics +and will be sent verbatim to Graphite. You may use either `fields_are_metrics` +or `metrics`, but not both. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The hostname or IP address of the Graphite server. + +[id="{version}-plugins-{type}s-{plugin}-include_metrics"] +===== `include_metrics` + + * Value type is <> + * Default value is `[".*"]` + +Include only regex matched metric names. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value type is <> + * Default value is `{}` + +The metric(s) to use. This supports dynamic strings like %{host} +for metric names and also for values. This is a hash field with key +being the metric name, value being the metric value. Example: +[source,ruby] + metrics => { "%{host}/uptime" => "%{uptime_1m}" } + +The value will be coerced to a floating point value. Values which cannot be +coerced will be set to zero (0). You may use either `metrics` or `fields_are_metrics`, +but not both. + +[id="{version}-plugins-{type}s-{plugin}-metrics_format"] +===== `metrics_format` + + * Value type is <> + * Default value is `"*"` + +Defines the format of the metric string. The placeholder '*' will be +replaced with the name of the actual metric. +[source,ruby] + metrics_format => "foo.bar.*.sum" + +NOTE: If no metrics_format is defined, the name of the metric will be used as fallback. + +[id="{version}-plugins-{type}s-{plugin}-nested_object_separator"] +===== `nested_object_separator` + + * Value type is <> + * Default value is `"."` + +When hashes are passed in as values they are broken out into a dotted notation +For instance if you configure this plugin with +# [source,ruby] + metrics => "mymetrics" + +and "mymetrics" is a nested hash of '{a => 1, b => { c => 2 }}' +this plugin will generate two metrics: a => 1, and b.c => 2 . +If you've specified a 'metrics_format' it will respect that, +but you still may want control over the separator within these nested key names. +This config setting changes the separator from the '.' default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `2003` + +The port to connect to on the Graphite server. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `2` + +Interval between reconnect attempts to Carbon. + +[id="{version}-plugins-{type}s-{plugin}-resend_on_failure"] +===== `resend_on_failure` + + * Value type is <> + * Default value is `false` + +Should metrics be resent on failure? + +[id="{version}-plugins-{type}s-{plugin}-timestamp_field"] +===== `timestamp_field` + + * Value type is <> + * Default value is `"@timestamp"` + +Use this field for the timestamp instead of '@timestamp' which is the +default. Useful when backfilling or just getting more accurate data into +graphite since you probably have a cache layer infront of Logstash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/graphtastic-index.asciidoc b/docs/versioned-plugins/outputs/graphtastic-index.asciidoc new file mode 100644 index 000000000..19ecb8c93 --- /dev/null +++ b/docs/versioned-plugins/outputs/graphtastic-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: graphtastic +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::graphtastic-v3.0.3.asciidoc[] +include::graphtastic-v3.0.2.asciidoc[] +include::graphtastic-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/graphtastic-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/graphtastic-v3.0.1.asciidoc new file mode 100644 index 000000000..ee3cab446 --- /dev/null +++ b/docs/versioned-plugins/outputs/graphtastic-v3.0.1.asciidoc @@ -0,0 +1,148 @@ +:plugin: graphtastic +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-graphtastic/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Graphtastic output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +A plugin for a newly developed Java/Spring Metrics application +I didn't really want to code this project but I couldn't find +a respectable alternative that would also run on any Windows +machine - which is the problem and why I am not going with Graphite +and statsd. This application provides multiple integration options +so as to make its use under your network requirements possible. +This includes a REST option that is always enabled for your use +in case you want to write a small script to send the occasional +metric data. + +Find GraphTastic here : https://github.com/NickPadilla/GraphTastic + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Graphtastic Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-batch_number>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-context>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-error_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-integration>> |<>, one of `["udp", "tcp", "rmi", "rest"]`|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-batch_number"] +===== `batch_number` + + * Value type is <> + * Default value is `60` + +the number of metrics to send to GraphTastic at one time. 60 seems to be the perfect +amount for UDP, with default packet size. + +[id="{version}-plugins-{type}s-{plugin}-context"] +===== `context` + + * Value type is <> + * Default value is `"graphtastic"` + +if using rest as your end point you need to also provide the application url +it defaults to localhost/graphtastic. You can customize the application url +by changing the name of the .war file. There are other ways to change the +application context, but they vary depending on the Application Server in use. +Please consult your application server documentation for more on application +contexts. + +[id="{version}-plugins-{type}s-{plugin}-error_file"] +===== `error_file` + + * Value type is <> + * Default value is `""` + +setting allows you to specify where we save errored transactions +this makes the most sense at this point - will need to decide +on how we reintegrate these error metrics +NOT IMPLEMENTED! + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"127.0.0.1"` + +host for the graphtastic server - defaults to 127.0.0.1 + +[id="{version}-plugins-{type}s-{plugin}-integration"] +===== `integration` + + * Value can be any of: `udp`, `tcp`, `rmi`, `rest` + * Default value is `"udp"` + +options are udp(fastest - default) - rmi(faster) - rest(fast) - tcp(don't use TCP yet - some problems - errors out on linux) + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value type is <> + * Default value is `{}` + +metrics hash - you will provide a name for your metric and the metric +data as key value pairs. so for example: + +[source,ruby] +metrics => { "Response" => "%{response}" } + +example for the logstash config + +[source,ruby] +metrics => [ "Response", "%{response}" ] + +NOTE: you can also use the dynamic fields for the key value as well as the actual value + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * There is no default value for this setting. + +port for the graphtastic instance - defaults to 1199 for RMI, 1299 for TCP, 1399 for UDP, and 8080 for REST + +[id="{version}-plugins-{type}s-{plugin}-retries"] +===== `retries` + + * Value type is <> + * Default value is `1` + +number of attempted retry after send error - currently only way to integrate +errored transactions - should try and save to a file or later consumption +either by graphtastic utility or by this program after connectivity is +ensured to be established. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/graphtastic-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/graphtastic-v3.0.2.asciidoc new file mode 100644 index 000000000..76bdcc9ac --- /dev/null +++ b/docs/versioned-plugins/outputs/graphtastic-v3.0.2.asciidoc @@ -0,0 +1,148 @@ +:plugin: graphtastic +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-graphtastic/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Graphtastic output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +A plugin for a newly developed Java/Spring Metrics application +I didn't really want to code this project but I couldn't find +a respectable alternative that would also run on any Windows +machine - which is the problem and why I am not going with Graphite +and statsd. This application provides multiple integration options +so as to make its use under your network requirements possible. +This includes a REST option that is always enabled for your use +in case you want to write a small script to send the occasional +metric data. + +Find GraphTastic here : https://github.com/NickPadilla/GraphTastic + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Graphtastic Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-batch_number>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-context>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-error_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-integration>> |<>, one of `["udp", "tcp", "rmi", "rest"]`|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-batch_number"] +===== `batch_number` + + * Value type is <> + * Default value is `60` + +the number of metrics to send to GraphTastic at one time. 60 seems to be the perfect +amount for UDP, with default packet size. + +[id="{version}-plugins-{type}s-{plugin}-context"] +===== `context` + + * Value type is <> + * Default value is `"graphtastic"` + +if using rest as your end point you need to also provide the application url +it defaults to localhost/graphtastic. You can customize the application url +by changing the name of the .war file. There are other ways to change the +application context, but they vary depending on the Application Server in use. +Please consult your application server documentation for more on application +contexts. + +[id="{version}-plugins-{type}s-{plugin}-error_file"] +===== `error_file` + + * Value type is <> + * Default value is `""` + +setting allows you to specify where we save errored transactions +this makes the most sense at this point - will need to decide +on how we reintegrate these error metrics +NOT IMPLEMENTED! + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"127.0.0.1"` + +host for the graphtastic server - defaults to 127.0.0.1 + +[id="{version}-plugins-{type}s-{plugin}-integration"] +===== `integration` + + * Value can be any of: `udp`, `tcp`, `rmi`, `rest` + * Default value is `"udp"` + +options are udp(fastest - default) - rmi(faster) - rest(fast) - tcp(don't use TCP yet - some problems - errors out on linux) + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value type is <> + * Default value is `{}` + +metrics hash - you will provide a name for your metric and the metric +data as key value pairs. so for example: + +[source,ruby] +metrics => { "Response" => "%{response}" } + +example for the logstash config + +[source,ruby] +metrics => [ "Response", "%{response}" ] + +NOTE: you can also use the dynamic fields for the key value as well as the actual value + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * There is no default value for this setting. + +port for the graphtastic instance - defaults to 1199 for RMI, 1299 for TCP, 1399 for UDP, and 8080 for REST + +[id="{version}-plugins-{type}s-{plugin}-retries"] +===== `retries` + + * Value type is <> + * Default value is `1` + +number of attempted retry after send error - currently only way to integrate +errored transactions - should try and save to a file or later consumption +either by graphtastic utility or by this program after connectivity is +ensured to be established. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/graphtastic-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/graphtastic-v3.0.3.asciidoc new file mode 100644 index 000000000..61f860fc0 --- /dev/null +++ b/docs/versioned-plugins/outputs/graphtastic-v3.0.3.asciidoc @@ -0,0 +1,148 @@ +:plugin: graphtastic +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-graphtastic/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Graphtastic output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +A plugin for a newly developed Java/Spring Metrics application +I didn't really want to code this project but I couldn't find +a respectable alternative that would also run on any Windows +machine - which is the problem and why I am not going with Graphite +and statsd. This application provides multiple integration options +so as to make its use under your network requirements possible. +This includes a REST option that is always enabled for your use +in case you want to write a small script to send the occasional +metric data. + +Find GraphTastic here : https://github.com/NickPadilla/GraphTastic + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Graphtastic Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-batch_number>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-context>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-error_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-integration>> |<>, one of `["udp", "tcp", "rmi", "rest"]`|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-batch_number"] +===== `batch_number` + + * Value type is <> + * Default value is `60` + +the number of metrics to send to GraphTastic at one time. 60 seems to be the perfect +amount for UDP, with default packet size. + +[id="{version}-plugins-{type}s-{plugin}-context"] +===== `context` + + * Value type is <> + * Default value is `"graphtastic"` + +if using rest as your end point you need to also provide the application url +it defaults to localhost/graphtastic. You can customize the application url +by changing the name of the .war file. There are other ways to change the +application context, but they vary depending on the Application Server in use. +Please consult your application server documentation for more on application +contexts. + +[id="{version}-plugins-{type}s-{plugin}-error_file"] +===== `error_file` + + * Value type is <> + * Default value is `""` + +setting allows you to specify where we save errored transactions +this makes the most sense at this point - will need to decide +on how we reintegrate these error metrics +NOT IMPLEMENTED! + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"127.0.0.1"` + +host for the graphtastic server - defaults to 127.0.0.1 + +[id="{version}-plugins-{type}s-{plugin}-integration"] +===== `integration` + + * Value can be any of: `udp`, `tcp`, `rmi`, `rest` + * Default value is `"udp"` + +options are udp(fastest - default) - rmi(faster) - rest(fast) - tcp(don't use TCP yet - some problems - errors out on linux) + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * Value type is <> + * Default value is `{}` + +metrics hash - you will provide a name for your metric and the metric +data as key value pairs. so for example: + +[source,ruby] +metrics => { "Response" => "%{response}" } + +example for the logstash config + +[source,ruby] +metrics => [ "Response", "%{response}" ] + +NOTE: you can also use the dynamic fields for the key value as well as the actual value + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * There is no default value for this setting. + +port for the graphtastic instance - defaults to 1199 for RMI, 1299 for TCP, 1399 for UDP, and 8080 for REST + +[id="{version}-plugins-{type}s-{plugin}-retries"] +===== `retries` + + * Value type is <> + * Default value is `1` + +number of attempted retry after send error - currently only way to integrate +errored transactions - should try and save to a file or later consumption +either by graphtastic utility or by this program after connectivity is +ensured to be established. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/hipchat-index.asciidoc b/docs/versioned-plugins/outputs/hipchat-index.asciidoc new file mode 100644 index 000000000..856f57c48 --- /dev/null +++ b/docs/versioned-plugins/outputs/hipchat-index.asciidoc @@ -0,0 +1,12 @@ +:plugin: hipchat +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-06-23 +|======================================================================= + +include::hipchat-v4.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/hipchat-v4.0.3.asciidoc b/docs/versioned-plugins/outputs/hipchat-v4.0.3.asciidoc new file mode 100644 index 000000000..d3bc367ca --- /dev/null +++ b/docs/versioned-plugins/outputs/hipchat-v4.0.3.asciidoc @@ -0,0 +1,122 @@ +:plugin: hipchat +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-hipchat/blob/v4.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Hipchat output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you to write events to https://www.hipchat.com/[HipChat]. + +Make sure your API token have the appropriate permissions and support +sending messages. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Hipchat Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-color>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-from>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>, one of `["html", "text"]`|No +| <<{version}-plugins-{type}s-{plugin}-room_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-token>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-trigger_notify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-color"] +===== `color` + + * Value type is <> + * Default value is `"yellow"` + +Background color for message. +HipChat currently supports one of "yellow", "red", "green", "purple", +"gray", or "random". (default: yellow), support fieldref + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * Default value is `"%{message}"` + +Message format to send, event tokens are usable here. + +[id="{version}-plugins-{type}s-{plugin}-from"] +===== `from` + + * Value type is <> + * Default value is `"logstash"` + +The name the message will appear be sent from, you can use fieldref + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * There is no default value for this setting. + +HipChat host to use + +[id="{version}-plugins-{type}s-{plugin}-message_format"] +===== `message_format` + + * Value can be any of: `html`, `text` + * Default value is `"html"` + +Specify `Message Format` + +[id="{version}-plugins-{type}s-{plugin}-room_id"] +===== `room_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The ID or name of the room, support fieldref + +[id="{version}-plugins-{type}s-{plugin}-token"] +===== `token` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The HipChat authentication token. + +[id="{version}-plugins-{type}s-{plugin}-trigger_notify"] +===== `trigger_notify` + + * Value type is <> + * Default value is `false` + +Whether or not this message should trigger a notification for people in the room. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/http-index.asciidoc b/docs/versioned-plugins/outputs/http-index.asciidoc new file mode 100644 index 000000000..9c52387b4 --- /dev/null +++ b/docs/versioned-plugins/outputs/http-index.asciidoc @@ -0,0 +1,30 @@ +:plugin: http +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2018-01-09 +| <> | 2017-11-07 +| <> | 2017-08-23 +| <> | 2017-08-16 +| <> | 2017-08-02 +| <> | 2017-08-23 +| <> | 2017-08-18 +| <> | 2017-07-06 +| <> | 2017-06-23 +| <> | 2017-05-08 +|======================================================================= + +include::http-v5.1.2.asciidoc[] +include::http-v5.1.1.asciidoc[] +include::http-v5.1.0.asciidoc[] +include::http-v5.0.1.asciidoc[] +include::http-v5.0.0.asciidoc[] +include::http-v4.4.0.asciidoc[] +include::http-v4.3.4.asciidoc[] +include::http-v4.3.2.asciidoc[] +include::http-v4.3.1.asciidoc[] +include::http-v4.3.0.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/http-v4.3.0.asciidoc b/docs/versioned-plugins/outputs/http-v4.3.0.asciidoc new file mode 100644 index 000000000..93f540faa --- /dev/null +++ b/docs/versioned-plugins/outputs/http-v4.3.0.asciidoc @@ -0,0 +1,380 @@ +:plugin: http +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.3.0 +:release_date: 2017-05-08 +:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v4.3.0/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Http + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Output Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No +| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-content_type"] +===== `content_type` + + * Value type is <> + * There is no default value for this setting. + +Content type + +If not specified, this defaults to the following: + +* if format is "json", "application/json" +* if format is "form", "application/x-www-form-urlencoded" + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value can be any of: `json`, `form`, `message` + * Default value is `"json"` + +Set the format of the http body. + +If form, then the body will be the mapping (or whole event) converted +into a query parameter string, e.g. `foo=bar&baz=fizz...` + +If message, then the body will be the result of formatting the event according to message + +Otherwise, the event is sent as json. + +[id="{version}-plugins-{type}s-{plugin}-headers"] +===== `headers` + + * Value type is <> + * There is no default value for this setting. + +Custom headers to use +format is `headers => ["X-My-Header", "%{host}"]` + +[id="{version}-plugins-{type}s-{plugin}-http_method"] +===== `http_method` + + * This is a required setting. + * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` + * There is no default value for this setting. + +The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" + +[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] +===== `ignorable_codes` + + * Value type is <> + * There is no default value for this setting. + +If you would like to consider some non-2xx codes to be successes +enumerate them here. Responses returning these codes will be considered successes + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-mapping"] +===== `mapping` + + * Value type is <> + * There is no default value for this setting. + +This lets you choose the structure and parts of the event that are sent. + + +For example: +[source,ruby] + mapping => {"foo" => "%{host}" + "bar" => "%{type}"} + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_failed"] +===== `retry_failed` + + * Value type is <> + * Default value is `true` + +Set this to false if you don't want this output to retry failed requests + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] +===== `retryable_codes` + + * Value type is <> + * Default value is `[429, 500, 502, 503, 504]` + +If encountered as response codes this plugin will retry these requests + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] +===== `ssl_certificate_validation` + + * Value type is <> + * Default value is `true` + +Set this to false to disable SSL/TLS certificate validation +Note: setting this to false is generally considered insecure! + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +This output lets you send events to a +generic HTTP(S) endpoint + +This output will execute up to 'pool_max' requests in parallel for performance. +Consider this when tuning this plugin for performance. + +Additionally, note that when parallel execution is used strict ordering of events is not +guaranteed! + +Beware, this gem does not yet support codecs. Please use the 'format' option for now. +URL to use + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +# You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/http-v4.3.1.asciidoc b/docs/versioned-plugins/outputs/http-v4.3.1.asciidoc new file mode 100644 index 000000000..fa66f652c --- /dev/null +++ b/docs/versioned-plugins/outputs/http-v4.3.1.asciidoc @@ -0,0 +1,381 @@ +:plugin: http +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.3.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v4.3.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Http output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No +| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-content_type"] +===== `content_type` + + * Value type is <> + * There is no default value for this setting. + +Content type + +If not specified, this defaults to the following: + +* if format is "json", "application/json" +* if format is "form", "application/x-www-form-urlencoded" + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value can be any of: `json`, `form`, `message` + * Default value is `"json"` + +Set the format of the http body. + +If form, then the body will be the mapping (or whole event) converted +into a query parameter string, e.g. `foo=bar&baz=fizz...` + +If message, then the body will be the result of formatting the event according to message + +Otherwise, the event is sent as json. + +[id="{version}-plugins-{type}s-{plugin}-headers"] +===== `headers` + + * Value type is <> + * There is no default value for this setting. + +Custom headers to use +format is `headers => ["X-My-Header", "%{host}"]` + +[id="{version}-plugins-{type}s-{plugin}-http_method"] +===== `http_method` + + * This is a required setting. + * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` + * There is no default value for this setting. + +The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" + +[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] +===== `ignorable_codes` + + * Value type is <> + * There is no default value for this setting. + +If you would like to consider some non-2xx codes to be successes +enumerate them here. Responses returning these codes will be considered successes + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-mapping"] +===== `mapping` + + * Value type is <> + * There is no default value for this setting. + +This lets you choose the structure and parts of the event that are sent. + + +For example: +[source,ruby] + mapping => {"foo" => "%{host}" + "bar" => "%{type}"} + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_failed"] +===== `retry_failed` + + * Value type is <> + * Default value is `true` + +Set this to false if you don't want this output to retry failed requests + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] +===== `retryable_codes` + + * Value type is <> + * Default value is `[429, 500, 502, 503, 504]` + +If encountered as response codes this plugin will retry these requests + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] +===== `ssl_certificate_validation` + + * Value type is <> + * Default value is `true` + +Set this to false to disable SSL/TLS certificate validation +Note: setting this to false is generally considered insecure! + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +This output lets you send events to a +generic HTTP(S) endpoint + +This output will execute up to 'pool_max' requests in parallel for performance. +Consider this when tuning this plugin for performance. + +Additionally, note that when parallel execution is used strict ordering of events is not +guaranteed! + +Beware, this gem does not yet support codecs. Please use the 'format' option for now. +URL to use + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +# You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/http-v4.3.2.asciidoc b/docs/versioned-plugins/outputs/http-v4.3.2.asciidoc new file mode 100644 index 000000000..659b382b3 --- /dev/null +++ b/docs/versioned-plugins/outputs/http-v4.3.2.asciidoc @@ -0,0 +1,377 @@ +:plugin: http +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.3.2 +:release_date: 2017-07-06 +:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v4.3.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No +| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-content_type"] +===== `content_type` + + * Value type is <> + * There is no default value for this setting. + +Content type + +If not specified, this defaults to the following: + +* if format is "json", "application/json" +* if format is "form", "application/x-www-form-urlencoded" + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value can be any of: `json`, `form`, `message` + * Default value is `"json"` + +Set the format of the http body. + +If form, then the body will be the mapping (or whole event) converted +into a query parameter string, e.g. `foo=bar&baz=fizz...` + +If message, then the body will be the result of formatting the event according to message + +Otherwise, the event is sent as json. + +[id="{version}-plugins-{type}s-{plugin}-headers"] +===== `headers` + + * Value type is <> + * There is no default value for this setting. + +Custom headers to use +format is `headers => ["X-My-Header", "%{host}"]` + +[id="{version}-plugins-{type}s-{plugin}-http_method"] +===== `http_method` + + * This is a required setting. + * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` + * There is no default value for this setting. + +The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" + +[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] +===== `ignorable_codes` + + * Value type is <> + * There is no default value for this setting. + +If you would like to consider some non-2xx codes to be successes +enumerate them here. Responses returning these codes will be considered successes + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-mapping"] +===== `mapping` + + * Value type is <> + * There is no default value for this setting. + +This lets you choose the structure and parts of the event that are sent. + + +For example: +[source,ruby] + mapping => {"foo" => "%{host}" + "bar" => "%{type}"} + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_failed"] +===== `retry_failed` + + * Value type is <> + * Default value is `true` + +Set this to false if you don't want this output to retry failed requests + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] +===== `retryable_codes` + + * Value type is <> + * Default value is `[429, 500, 502, 503, 504]` + +If encountered as response codes this plugin will retry these requests + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] +===== `ssl_certificate_validation` + + * Value type is <> + * Default value is `true` + +Set this to false to disable SSL/TLS certificate validation +Note: setting this to false is generally considered insecure! + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +This output lets you send events to a +generic HTTP(S) endpoint + +This output will execute up to 'pool_max' requests in parallel for performance. +Consider this when tuning this plugin for performance. + +Additionally, note that when parallel execution is used strict ordering of events is not +guaranteed! + +Beware, this gem does not yet support codecs. Please use the 'format' option for now. +URL to use + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +# You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/http-v4.3.4.asciidoc b/docs/versioned-plugins/outputs/http-v4.3.4.asciidoc new file mode 100644 index 000000000..79519d7db --- /dev/null +++ b/docs/versioned-plugins/outputs/http-v4.3.4.asciidoc @@ -0,0 +1,379 @@ +:plugin: http +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.3.4 +:release_date: 2017-08-18 +:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v4.3.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you send events to a generic HTTP(S) endpoint. + +This output will execute up to 'pool_max' requests in parallel for performance. +Consider this when tuning this plugin for performance. + +Additionally, note that when parallel execution is used strict ordering of events is not +guaranteed! + +Beware, this gem does not yet support codecs. Please use the 'format' option for now. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No +| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-content_type"] +===== `content_type` + + * Value type is <> + * There is no default value for this setting. + +Content type + +If not specified, this defaults to the following: + +* if format is "json", "application/json" +* if format is "form", "application/x-www-form-urlencoded" + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value can be any of: `json`, `form`, `message` + * Default value is `"json"` + +Set the format of the http body. + +If form, then the body will be the mapping (or whole event) converted +into a query parameter string, e.g. `foo=bar&baz=fizz...` + +If message, then the body will be the result of formatting the event according to message + +Otherwise, the event is sent as json. + +[id="{version}-plugins-{type}s-{plugin}-headers"] +===== `headers` + + * Value type is <> + * There is no default value for this setting. + +Custom headers to use +format is `headers => ["X-My-Header", "%{host}"]` + +[id="{version}-plugins-{type}s-{plugin}-http_method"] +===== `http_method` + + * This is a required setting. + * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` + * There is no default value for this setting. + +The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" + +[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] +===== `ignorable_codes` + + * Value type is <> + * There is no default value for this setting. + +If you would like to consider some non-2xx codes to be successes +enumerate them here. Responses returning these codes will be considered successes + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-mapping"] +===== `mapping` + + * Value type is <> + * There is no default value for this setting. + +This lets you choose the structure and parts of the event that are sent. + + +For example: +[source,ruby] + mapping => {"foo" => "%{host}" + "bar" => "%{type}"} + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_failed"] +===== `retry_failed` + + * Value type is <> + * Default value is `true` + +Set this to false if you don't want this output to retry failed requests + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] +===== `retryable_codes` + + * Value type is <> + * Default value is `[429, 500, 502, 503, 504]` + +If encountered as response codes this plugin will retry these requests + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] +===== `ssl_certificate_validation` + + * Value type is <> + * Default value is `true` + +Set this to false to disable SSL/TLS certificate validation +Note: setting this to false is generally considered insecure! + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +URL to use + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/http-v4.4.0.asciidoc b/docs/versioned-plugins/outputs/http-v4.4.0.asciidoc new file mode 100644 index 000000000..3f38c4f33 --- /dev/null +++ b/docs/versioned-plugins/outputs/http-v4.4.0.asciidoc @@ -0,0 +1,389 @@ +:plugin: http +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.4.0 +:release_date: 2017-08-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v4.4.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you send events to a generic HTTP(S) endpoint. + +This output will execute up to 'pool_max' requests in parallel for performance. +Consider this when tuning this plugin for performance. + +Additionally, note that when parallel execution is used strict ordering of events is not +guaranteed! + +Beware, this gem does not yet support codecs. Please use the 'format' option for now. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No +| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-content_type"] +===== `content_type` + + * Value type is <> + * There is no default value for this setting. + +Content type + +If not specified, this defaults to the following: + +* if format is "json", "application/json" +* if format is "form", "application/x-www-form-urlencoded" + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value can be any of: `json`, `form`, `message` + * Default value is `"json"` + +Set the format of the http body. + +If form, then the body will be the mapping (or whole event) converted +into a query parameter string, e.g. `foo=bar&baz=fizz...` + +If message, then the body will be the result of formatting the event according to message + +Otherwise, the event is sent as json. + +[id="{version}-plugins-{type}s-{plugin}-headers"] +===== `headers` + + * Value type is <> + * There is no default value for this setting. + +Custom headers to use +format is `headers => ["X-My-Header", "%{host}"]` + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable request compression support. With this enabled the plugin will compress +http requests using gzip. + +[id="{version}-plugins-{type}s-{plugin}-http_method"] +===== `http_method` + + * This is a required setting. + * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` + * There is no default value for this setting. + +The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" + +[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] +===== `ignorable_codes` + + * Value type is <> + * There is no default value for this setting. + +If you would like to consider some non-2xx codes to be successes +enumerate them here. Responses returning these codes will be considered successes + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-mapping"] +===== `mapping` + + * Value type is <> + * There is no default value for this setting. + +This lets you choose the structure and parts of the event that are sent. + + +For example: +[source,ruby] + mapping => {"foo" => "%{host}" + "bar" => "%{type}"} + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_failed"] +===== `retry_failed` + + * Value type is <> + * Default value is `true` + +Set this to false if you don't want this output to retry failed requests + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] +===== `retryable_codes` + + * Value type is <> + * Default value is `[429, 500, 502, 503, 504]` + +If encountered as response codes this plugin will retry these requests + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] +===== `ssl_certificate_validation` + + * Value type is <> + * Default value is `true` + +Set this to false to disable SSL/TLS certificate validation +Note: setting this to false is generally considered insecure! + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +URL to use + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/http-v5.0.0.asciidoc b/docs/versioned-plugins/outputs/http-v5.0.0.asciidoc new file mode 100644 index 000000000..6aa7924ce --- /dev/null +++ b/docs/versioned-plugins/outputs/http-v5.0.0.asciidoc @@ -0,0 +1,369 @@ +:plugin: http +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.0 +:release_date: 2017-08-02 +:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v5.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you send events to a generic HTTP(S) endpoint. + +This output will execute up to 'pool_max' requests in parallel for performance. +Consider this when tuning this plugin for performance. + +Additionally, note that when parallel execution is used strict ordering of events is not +guaranteed! + +Beware, this gem does not yet support codecs. Please use the 'format' option for now. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No +| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-content_type"] +===== `content_type` + + * Value type is <> + * There is no default value for this setting. + +Content type + +If not specified, this defaults to the following: + +* if format is "json", "application/json" +* if format is "form", "application/x-www-form-urlencoded" + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value can be any of: `json`, `form`, `message` + * Default value is `"json"` + +Set the format of the http body. + +If form, then the body will be the mapping (or whole event) converted +into a query parameter string, e.g. `foo=bar&baz=fizz...` + +If message, then the body will be the result of formatting the event according to message + +Otherwise, the event is sent as json. + +[id="{version}-plugins-{type}s-{plugin}-headers"] +===== `headers` + + * Value type is <> + * There is no default value for this setting. + +Custom headers to use +format is `headers => ["X-My-Header", "%{host}"]` + +[id="{version}-plugins-{type}s-{plugin}-http_method"] +===== `http_method` + + * This is a required setting. + * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` + * There is no default value for this setting. + +The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" + +[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] +===== `ignorable_codes` + + * Value type is <> + * There is no default value for this setting. + +If you would like to consider some non-2xx codes to be successes +enumerate them here. Responses returning these codes will be considered successes + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-mapping"] +===== `mapping` + + * Value type is <> + * There is no default value for this setting. + +This lets you choose the structure and parts of the event that are sent. + + +For example: +[source,ruby] + mapping => {"foo" => "%{host}" + "bar" => "%{type}"} + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_failed"] +===== `retry_failed` + + * Value type is <> + * Default value is `true` + +Set this to false if you don't want this output to retry failed requests + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] +===== `retryable_codes` + + * Value type is <> + * Default value is `[429, 500, 502, 503, 504]` + +If encountered as response codes this plugin will retry these requests + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +URL to use + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/http-v5.0.1.asciidoc b/docs/versioned-plugins/outputs/http-v5.0.1.asciidoc new file mode 100644 index 000000000..52bc55d47 --- /dev/null +++ b/docs/versioned-plugins/outputs/http-v5.0.1.asciidoc @@ -0,0 +1,369 @@ +:plugin: http +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.1 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v5.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you send events to a generic HTTP(S) endpoint. + +This output will execute up to 'pool_max' requests in parallel for performance. +Consider this when tuning this plugin for performance. + +Additionally, note that when parallel execution is used strict ordering of events is not +guaranteed! + +Beware, this gem does not yet support codecs. Please use the 'format' option for now. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No +| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-content_type"] +===== `content_type` + + * Value type is <> + * There is no default value for this setting. + +Content type + +If not specified, this defaults to the following: + +* if format is "json", "application/json" +* if format is "form", "application/x-www-form-urlencoded" + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value can be any of: `json`, `form`, `message` + * Default value is `"json"` + +Set the format of the http body. + +If form, then the body will be the mapping (or whole event) converted +into a query parameter string, e.g. `foo=bar&baz=fizz...` + +If message, then the body will be the result of formatting the event according to message + +Otherwise, the event is sent as json. + +[id="{version}-plugins-{type}s-{plugin}-headers"] +===== `headers` + + * Value type is <> + * There is no default value for this setting. + +Custom headers to use +format is `headers => ["X-My-Header", "%{host}"]` + +[id="{version}-plugins-{type}s-{plugin}-http_method"] +===== `http_method` + + * This is a required setting. + * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` + * There is no default value for this setting. + +The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" + +[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] +===== `ignorable_codes` + + * Value type is <> + * There is no default value for this setting. + +If you would like to consider some non-2xx codes to be successes +enumerate them here. Responses returning these codes will be considered successes + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-mapping"] +===== `mapping` + + * Value type is <> + * There is no default value for this setting. + +This lets you choose the structure and parts of the event that are sent. + + +For example: +[source,ruby] + mapping => {"foo" => "%{host}" + "bar" => "%{type}"} + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_failed"] +===== `retry_failed` + + * Value type is <> + * Default value is `true` + +Set this to false if you don't want this output to retry failed requests + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] +===== `retryable_codes` + + * Value type is <> + * Default value is `[429, 500, 502, 503, 504]` + +If encountered as response codes this plugin will retry these requests + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +URL to use + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/http-v5.1.0.asciidoc b/docs/versioned-plugins/outputs/http-v5.1.0.asciidoc new file mode 100644 index 000000000..f21efdb39 --- /dev/null +++ b/docs/versioned-plugins/outputs/http-v5.1.0.asciidoc @@ -0,0 +1,379 @@ +:plugin: http +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.1.0 +:release_date: 2017-08-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v5.1.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you send events to a generic HTTP(S) endpoint. + +This output will execute up to 'pool_max' requests in parallel for performance. +Consider this when tuning this plugin for performance. + +Additionally, note that when parallel execution is used strict ordering of events is not +guaranteed! + +Beware, this gem does not yet support codecs. Please use the 'format' option for now. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No +| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-content_type"] +===== `content_type` + + * Value type is <> + * There is no default value for this setting. + +Content type + +If not specified, this defaults to the following: + +* if format is "json", "application/json" +* if format is "form", "application/x-www-form-urlencoded" + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value can be any of: `json`, `form`, `message` + * Default value is `"json"` + +Set the format of the http body. + +If form, then the body will be the mapping (or whole event) converted +into a query parameter string, e.g. `foo=bar&baz=fizz...` + +If message, then the body will be the result of formatting the event according to message + +Otherwise, the event is sent as json. + +[id="{version}-plugins-{type}s-{plugin}-headers"] +===== `headers` + + * Value type is <> + * There is no default value for this setting. + +Custom headers to use +format is `headers => ["X-My-Header", "%{host}"]` + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable request compression support. With this enabled the plugin will compress +http requests using gzip. + +[id="{version}-plugins-{type}s-{plugin}-http_method"] +===== `http_method` + + * This is a required setting. + * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` + * There is no default value for this setting. + +The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" + +[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] +===== `ignorable_codes` + + * Value type is <> + * There is no default value for this setting. + +If you would like to consider some non-2xx codes to be successes +enumerate them here. Responses returning these codes will be considered successes + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-mapping"] +===== `mapping` + + * Value type is <> + * There is no default value for this setting. + +This lets you choose the structure and parts of the event that are sent. + + +For example: +[source,ruby] + mapping => {"foo" => "%{host}" + "bar" => "%{type}"} + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_failed"] +===== `retry_failed` + + * Value type is <> + * Default value is `true` + +Set this to false if you don't want this output to retry failed requests + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] +===== `retryable_codes` + + * Value type is <> + * Default value is `[429, 500, 502, 503, 504]` + +If encountered as response codes this plugin will retry these requests + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +URL to use + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/http-v5.1.1.asciidoc b/docs/versioned-plugins/outputs/http-v5.1.1.asciidoc new file mode 100644 index 000000000..1ea2e1a5f --- /dev/null +++ b/docs/versioned-plugins/outputs/http-v5.1.1.asciidoc @@ -0,0 +1,379 @@ +:plugin: http +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.1.1 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v5.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you send events to a generic HTTP(S) endpoint. + +This output will execute up to 'pool_max' requests in parallel for performance. +Consider this when tuning this plugin for performance. + +Additionally, note that when parallel execution is used strict ordering of events is not +guaranteed! + +Beware, this gem does not yet support codecs. Please use the 'format' option for now. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No +| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-content_type"] +===== `content_type` + + * Value type is <> + * There is no default value for this setting. + +Content type + +If not specified, this defaults to the following: + +* if format is "json", "application/json" +* if format is "form", "application/x-www-form-urlencoded" + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value can be any of: `json`, `form`, `message` + * Default value is `"json"` + +Set the format of the http body. + +If form, then the body will be the mapping (or whole event) converted +into a query parameter string, e.g. `foo=bar&baz=fizz...` + +If message, then the body will be the result of formatting the event according to message + +Otherwise, the event is sent as json. + +[id="{version}-plugins-{type}s-{plugin}-headers"] +===== `headers` + + * Value type is <> + * There is no default value for this setting. + +Custom headers to use +format is `headers => ["X-My-Header", "%{host}"]` + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable request compression support. With this enabled the plugin will compress +http requests using gzip. + +[id="{version}-plugins-{type}s-{plugin}-http_method"] +===== `http_method` + + * This is a required setting. + * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` + * There is no default value for this setting. + +The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" + +[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] +===== `ignorable_codes` + + * Value type is <> + * There is no default value for this setting. + +If you would like to consider some non-2xx codes to be successes +enumerate them here. Responses returning these codes will be considered successes + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-mapping"] +===== `mapping` + + * Value type is <> + * There is no default value for this setting. + +This lets you choose the structure and parts of the event that are sent. + + +For example: +[source,ruby] + mapping => {"foo" => "%{host}" + "bar" => "%{type}"} + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_failed"] +===== `retry_failed` + + * Value type is <> + * Default value is `true` + +Set this to false if you don't want this output to retry failed requests + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] +===== `retryable_codes` + + * Value type is <> + * Default value is `[429, 500, 502, 503, 504]` + +If encountered as response codes this plugin will retry these requests + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +URL to use + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/http-v5.1.2.asciidoc b/docs/versioned-plugins/outputs/http-v5.1.2.asciidoc new file mode 100644 index 000000000..2d64b3bd2 --- /dev/null +++ b/docs/versioned-plugins/outputs/http-v5.1.2.asciidoc @@ -0,0 +1,379 @@ +:plugin: http +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.1.2 +:release_date: 2018-01-09 +:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v5.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Http output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you send events to a generic HTTP(S) endpoint. + +This output will execute up to 'pool_max' requests in parallel for performance. +Consider this when tuning this plugin for performance. + +Additionally, note that when parallel execution is used strict ordering of events is not +guaranteed! + +Beware, this gem does not yet support codecs. Please use the 'format' option for now. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Http Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No +| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] +===== `automatic_retries` + + * Value type is <> + * Default value is `1` + +How many times should the client retry a failing URL. We highly recommend NOT setting this value +to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! +Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-content_type"] +===== `content_type` + + * Value type is <> + * There is no default value for this setting. + +Content type + +If not specified, this defaults to the following: + +* if format is "json", "application/json" +* if format is "form", "application/x-www-form-urlencoded" + +[id="{version}-plugins-{type}s-{plugin}-cookies"] +===== `cookies` + + * Value type is <> + * Default value is `true` + +Enable cookie support. With this enabled the client will persist cookies +across requests as a normal web browser would. Enabled by default + +[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] +===== `follow_redirects` + + * Value type is <> + * Default value is `true` + +Should redirects be followed? Defaults to `true` + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value can be any of: `json`, `form`, `message` + * Default value is `"json"` + +Set the format of the http body. + +If form, then the body will be the mapping (or whole event) converted +into a query parameter string, e.g. `foo=bar&baz=fizz...` + +If message, then the body will be the result of formatting the event according to message + +Otherwise, the event is sent as json. + +[id="{version}-plugins-{type}s-{plugin}-headers"] +===== `headers` + + * Value type is <> + * There is no default value for this setting. + +Custom headers to use +format is `headers => ["X-My-Header", "%{host}"]` + +[id="{version}-plugins-{type}s-{plugin}-http_compression"] +===== `http_compression` + + * Value type is <> + * Default value is `false` + +Enable request compression support. With this enabled the plugin will compress +http requests using gzip. + +[id="{version}-plugins-{type}s-{plugin}-http_method"] +===== `http_method` + + * This is a required setting. + * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` + * There is no default value for this setting. + +The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" + +[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] +===== `ignorable_codes` + + * Value type is <> + * There is no default value for this setting. + +If you would like to consider some non-2xx codes to be successes +enumerate them here. Responses returning these codes will be considered successes + +[id="{version}-plugins-{type}s-{plugin}-keepalive"] +===== `keepalive` + + * Value type is <> + * Default value is `true` + +Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least +one with this to fix interactions with broken keepalive implementations. + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-mapping"] +===== `mapping` + + * Value type is <> + * There is no default value for this setting. + +This lets you choose the structure and parts of the event that are sent. + + +For example: +[source,ruby] + mapping => {"foo" => "%{host}" + "bar" => "%{type}"} + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + +[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] +===== `pool_max_per_route` + + * Value type is <> + * Default value is `25` + +Max number of concurrent connections to a single host. Defaults to `25` + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + +[id="{version}-plugins-{type}s-{plugin}-retry_failed"] +===== `retry_failed` + + * Value type is <> + * Default value is `true` + +Set this to false if you don't want this output to retry failed requests + +[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] +===== `retry_non_idempotent` + + * Value type is <> + * Default value is `false` + +If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. + +[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] +===== `retryable_codes` + + * Value type is <> + * Default value is `[429, 500, 502, 503, 504]` + +If encountered as response codes this plugin will retry these requests + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +URL to use + +[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] +===== `validate_after_inactivity` + + * Value type is <> + * Default value is `200` + +How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. +You may want to set this lower, possibly to 0 if you get connection errors regularly +Quoting the Apache commons docs (this client is based Apache Commmons): +'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' +See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/icinga-index.asciidoc b/docs/versioned-plugins/outputs/icinga-index.asciidoc new file mode 100644 index 000000000..55ee44d43 --- /dev/null +++ b/docs/versioned-plugins/outputs/icinga-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: icinga +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/outputs/influxdb-index.asciidoc b/docs/versioned-plugins/outputs/influxdb-index.asciidoc new file mode 100644 index 000000000..152a1a7a9 --- /dev/null +++ b/docs/versioned-plugins/outputs/influxdb-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: influxdb +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::influxdb-v5.0.3.asciidoc[] +include::influxdb-v5.0.2.asciidoc[] +include::influxdb-v5.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/influxdb-v5.0.1.asciidoc b/docs/versioned-plugins/outputs/influxdb-v5.0.1.asciidoc new file mode 100644 index 000000000..c758109af --- /dev/null +++ b/docs/versioned-plugins/outputs/influxdb-v5.0.1.asciidoc @@ -0,0 +1,270 @@ +:plugin: influxdb +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-influxdb/blob/v5.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Influxdb output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you output Metrics to InfluxDB (>= 0.9.0-rc31) + +The configuration here attempts to be as friendly as possible +and minimize the need for multiple definitions to write to +multiple measurements and still be efficient + +the InfluxDB API let's you do some semblance of bulk operation +per http call but each call is database-specific + +You can learn more at http://influxdb.com[InfluxDB homepage] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Influxdb Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-allow_time_override>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-coerce_values>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-data_points>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-initial_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-measurement>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retention_policy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-send_as_tags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-time_precision>> |<>, one of `["n", "u", "ms", "s", "m", "h"]`|No +| <<{version}-plugins-{type}s-{plugin}-use_event_fields_for_data_points>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-allow_time_override"] +===== `allow_time_override` + + * Value type is <> + * Default value is `false` + +Allow the override of the `time` column in the event? + +By default any column with a name of `time` will be ignored and the time will +be determined by the value of `@timestamp`. + +Setting this to `true` allows you to explicitly set the `time` column yourself + +Note: **`time` must be an epoch value in either seconds, milliseconds or microseconds** + +[id="{version}-plugins-{type}s-{plugin}-coerce_values"] +===== `coerce_values` + + * Value type is <> + * Default value is `{}` + +Allow value coercion + +this will attempt to convert data point values to the appropriate type before posting +otherwise sprintf-filtered numeric values could get sent as strings +format is `{'column_name' => 'datatype'}` + +currently supported datatypes are `integer` and `float` + + +[id="{version}-plugins-{type}s-{plugin}-data_points"] +===== `data_points` + + * This is a required setting. + * Value type is <> + * Default value is `{}` + +Hash of key/value pairs representing data points to send to the named database +Example: `{'column1' => 'value1', 'column2' => 'value2'}` + +Events for the same measurement will be batched together where possible +Both keys and values support sprintf formatting + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * Value type is <> + * Default value is `"statistics"` + +The database to write - supports sprintf formatting + +[id="{version}-plugins-{type}s-{plugin}-exclude_fields"] +===== `exclude_fields` + + * Value type is <> + * Default value is `["@timestamp", "@version", "sequence", "message", "type"]` + +An array containing the names of fields from the event to exclude from the +data points + +Events, in general, contain keys "@version" and "@timestamp". Other plugins +may add others that you'll want to exclude (such as "command" from the +exec plugin). + +This only applies when use_event_fields_for_data_points is true. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` + + * Value type is <> + * Default value is `100` + +This setting controls how many events will be buffered before sending a batch +of events. Note that these are only batched for the same measurement + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The hostname or IP address to reach your InfluxDB instance + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` + + * Value type is <> + * Default value is `1` + +The amount of time since last flush before a flush is forced. + +This setting helps ensure slow event rates don't get stuck in Logstash. +For example, if your `flush_size` is 100, and you have received 10 events, +and it has been more than `idle_flush_time` seconds since the last flush, +logstash will flush those 10 events automatically. + +This helps keep both fast and slow log streams moving along in +near-real-time. + +[id="{version}-plugins-{type}s-{plugin}-initial_delay"] +===== `initial_delay` + + * Value type is <> + * Default value is `1` + +The amount of time in seconds to delay the initial retry on connection failure. + +The delay will increase exponentially for each retry attempt (up to max_retries). + +[id="{version}-plugins-{type}s-{plugin}-max_retries"] +===== `max_retries` + + * Value type is <> + * Default value is `3` + +The number of time to retry recoverable errors before dropping the events. + +A value of -1 will cause the plugin to retry indefinately. +A value of 0 will cause the plugin to never retry. +Otherwise it will retry up to the specified mumber of times. + + +[id="{version}-plugins-{type}s-{plugin}-measurement"] +===== `measurement` + + * Value type is <> + * Default value is `"logstash"` + +Measurement name - supports sprintf formatting + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `nil` + +The password for the user who access to the named database + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8086` + +The port for InfluxDB + +[id="{version}-plugins-{type}s-{plugin}-retention_policy"] +===== `retention_policy` + + * Value type is <> + * Default value is `"autogen"` + +The retention policy to use + +[id="{version}-plugins-{type}s-{plugin}-send_as_tags"] +===== `send_as_tags` + + * Value type is <> + * Default value is `["host"]` + +An array containing the names of fields to send to Influxdb as tags instead +of fields. Influxdb 0.9 convention is that values that do not change every +request should be considered metadata and given as tags. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +Enable SSL/TLS secured communication to InfluxDB + +[id="{version}-plugins-{type}s-{plugin}-time_precision"] +===== `time_precision` + + * Value can be any of: `n`, `u`, `ms`, `s`, `m`, `h` + * Default value is `"ms"` + +Set the level of precision of `time` + +only useful when overriding the time value + +[id="{version}-plugins-{type}s-{plugin}-use_event_fields_for_data_points"] +===== `use_event_fields_for_data_points` + + * Value type is <> + * Default value is `false` + +Automatically use fields from the event as the data points sent to Influxdb + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `nil` + +The user who has access to the named database + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/influxdb-v5.0.2.asciidoc b/docs/versioned-plugins/outputs/influxdb-v5.0.2.asciidoc new file mode 100644 index 000000000..734b8f0b0 --- /dev/null +++ b/docs/versioned-plugins/outputs/influxdb-v5.0.2.asciidoc @@ -0,0 +1,270 @@ +:plugin: influxdb +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.2 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-influxdb/blob/v5.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Influxdb output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you output Metrics to InfluxDB (>= 0.9.0-rc31) + +The configuration here attempts to be as friendly as possible +and minimize the need for multiple definitions to write to +multiple measurements and still be efficient + +the InfluxDB API let's you do some semblance of bulk operation +per http call but each call is database-specific + +You can learn more at http://influxdb.com[InfluxDB homepage] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Influxdb Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-allow_time_override>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-coerce_values>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-data_points>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-initial_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-measurement>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retention_policy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-send_as_tags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-time_precision>> |<>, one of `["n", "u", "ms", "s", "m", "h"]`|No +| <<{version}-plugins-{type}s-{plugin}-use_event_fields_for_data_points>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-allow_time_override"] +===== `allow_time_override` + + * Value type is <> + * Default value is `false` + +Allow the override of the `time` column in the event? + +By default any column with a name of `time` will be ignored and the time will +be determined by the value of `@timestamp`. + +Setting this to `true` allows you to explicitly set the `time` column yourself + +Note: **`time` must be an epoch value in either seconds, milliseconds or microseconds** + +[id="{version}-plugins-{type}s-{plugin}-coerce_values"] +===== `coerce_values` + + * Value type is <> + * Default value is `{}` + +Allow value coercion + +this will attempt to convert data point values to the appropriate type before posting +otherwise sprintf-filtered numeric values could get sent as strings +format is `{'column_name' => 'datatype'}` + +currently supported datatypes are `integer` and `float` + + +[id="{version}-plugins-{type}s-{plugin}-data_points"] +===== `data_points` + + * This is a required setting. + * Value type is <> + * Default value is `{}` + +Hash of key/value pairs representing data points to send to the named database +Example: `{'column1' => 'value1', 'column2' => 'value2'}` + +Events for the same measurement will be batched together where possible +Both keys and values support sprintf formatting + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * Value type is <> + * Default value is `"statistics"` + +The database to write - supports sprintf formatting + +[id="{version}-plugins-{type}s-{plugin}-exclude_fields"] +===== `exclude_fields` + + * Value type is <> + * Default value is `["@timestamp", "@version", "sequence", "message", "type"]` + +An array containing the names of fields from the event to exclude from the +data points + +Events, in general, contain keys "@version" and "@timestamp". Other plugins +may add others that you'll want to exclude (such as "command" from the +exec plugin). + +This only applies when use_event_fields_for_data_points is true. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` + + * Value type is <> + * Default value is `100` + +This setting controls how many events will be buffered before sending a batch +of events. Note that these are only batched for the same measurement + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The hostname or IP address to reach your InfluxDB instance + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` + + * Value type is <> + * Default value is `1` + +The amount of time since last flush before a flush is forced. + +This setting helps ensure slow event rates don't get stuck in Logstash. +For example, if your `flush_size` is 100, and you have received 10 events, +and it has been more than `idle_flush_time` seconds since the last flush, +logstash will flush those 10 events automatically. + +This helps keep both fast and slow log streams moving along in +near-real-time. + +[id="{version}-plugins-{type}s-{plugin}-initial_delay"] +===== `initial_delay` + + * Value type is <> + * Default value is `1` + +The amount of time in seconds to delay the initial retry on connection failure. + +The delay will increase exponentially for each retry attempt (up to max_retries). + +[id="{version}-plugins-{type}s-{plugin}-max_retries"] +===== `max_retries` + + * Value type is <> + * Default value is `3` + +The number of time to retry recoverable errors before dropping the events. + +A value of -1 will cause the plugin to retry indefinately. +A value of 0 will cause the plugin to never retry. +Otherwise it will retry up to the specified mumber of times. + + +[id="{version}-plugins-{type}s-{plugin}-measurement"] +===== `measurement` + + * Value type is <> + * Default value is `"logstash"` + +Measurement name - supports sprintf formatting + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `nil` + +The password for the user who access to the named database + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8086` + +The port for InfluxDB + +[id="{version}-plugins-{type}s-{plugin}-retention_policy"] +===== `retention_policy` + + * Value type is <> + * Default value is `"autogen"` + +The retention policy to use + +[id="{version}-plugins-{type}s-{plugin}-send_as_tags"] +===== `send_as_tags` + + * Value type is <> + * Default value is `["host"]` + +An array containing the names of fields to send to Influxdb as tags instead +of fields. Influxdb 0.9 convention is that values that do not change every +request should be considered metadata and given as tags. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +Enable SSL/TLS secured communication to InfluxDB + +[id="{version}-plugins-{type}s-{plugin}-time_precision"] +===== `time_precision` + + * Value can be any of: `n`, `u`, `ms`, `s`, `m`, `h` + * Default value is `"ms"` + +Set the level of precision of `time` + +only useful when overriding the time value + +[id="{version}-plugins-{type}s-{plugin}-use_event_fields_for_data_points"] +===== `use_event_fields_for_data_points` + + * Value type is <> + * Default value is `false` + +Automatically use fields from the event as the data points sent to Influxdb + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `nil` + +The user who has access to the named database + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/influxdb-v5.0.3.asciidoc b/docs/versioned-plugins/outputs/influxdb-v5.0.3.asciidoc new file mode 100644 index 000000000..1a06fe59f --- /dev/null +++ b/docs/versioned-plugins/outputs/influxdb-v5.0.3.asciidoc @@ -0,0 +1,270 @@ +:plugin: influxdb +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-influxdb/blob/v5.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Influxdb output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you output Metrics to InfluxDB (>= 0.9.0-rc31) + +The configuration here attempts to be as friendly as possible +and minimize the need for multiple definitions to write to +multiple measurements and still be efficient + +the InfluxDB API let's you do some semblance of bulk operation +per http call but each call is database-specific + +You can learn more at http://influxdb.com[InfluxDB homepage] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Influxdb Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-allow_time_override>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-coerce_values>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-data_points>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-initial_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-measurement>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retention_policy>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-send_as_tags>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-time_precision>> |<>, one of `["n", "u", "ms", "s", "m", "h"]`|No +| <<{version}-plugins-{type}s-{plugin}-use_event_fields_for_data_points>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-allow_time_override"] +===== `allow_time_override` + + * Value type is <> + * Default value is `false` + +Allow the override of the `time` column in the event? + +By default any column with a name of `time` will be ignored and the time will +be determined by the value of `@timestamp`. + +Setting this to `true` allows you to explicitly set the `time` column yourself + +Note: **`time` must be an epoch value in either seconds, milliseconds or microseconds** + +[id="{version}-plugins-{type}s-{plugin}-coerce_values"] +===== `coerce_values` + + * Value type is <> + * Default value is `{}` + +Allow value coercion + +this will attempt to convert data point values to the appropriate type before posting +otherwise sprintf-filtered numeric values could get sent as strings +format is `{'column_name' => 'datatype'}` + +currently supported datatypes are `integer` and `float` + + +[id="{version}-plugins-{type}s-{plugin}-data_points"] +===== `data_points` + + * This is a required setting. + * Value type is <> + * Default value is `{}` + +Hash of key/value pairs representing data points to send to the named database +Example: `{'column1' => 'value1', 'column2' => 'value2'}` + +Events for the same measurement will be batched together where possible +Both keys and values support sprintf formatting + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * Value type is <> + * Default value is `"statistics"` + +The database to write - supports sprintf formatting + +[id="{version}-plugins-{type}s-{plugin}-exclude_fields"] +===== `exclude_fields` + + * Value type is <> + * Default value is `["@timestamp", "@version", "sequence", "message", "type"]` + +An array containing the names of fields from the event to exclude from the +data points + +Events, in general, contain keys "@version" and "@timestamp". Other plugins +may add others that you'll want to exclude (such as "command" from the +exec plugin). + +This only applies when use_event_fields_for_data_points is true. + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` + + * Value type is <> + * Default value is `100` + +This setting controls how many events will be buffered before sending a batch +of events. Note that these are only batched for the same measurement + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The hostname or IP address to reach your InfluxDB instance + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` + + * Value type is <> + * Default value is `1` + +The amount of time since last flush before a flush is forced. + +This setting helps ensure slow event rates don't get stuck in Logstash. +For example, if your `flush_size` is 100, and you have received 10 events, +and it has been more than `idle_flush_time` seconds since the last flush, +logstash will flush those 10 events automatically. + +This helps keep both fast and slow log streams moving along in +near-real-time. + +[id="{version}-plugins-{type}s-{plugin}-initial_delay"] +===== `initial_delay` + + * Value type is <> + * Default value is `1` + +The amount of time in seconds to delay the initial retry on connection failure. + +The delay will increase exponentially for each retry attempt (up to max_retries). + +[id="{version}-plugins-{type}s-{plugin}-max_retries"] +===== `max_retries` + + * Value type is <> + * Default value is `3` + +The number of time to retry recoverable errors before dropping the events. + +A value of -1 will cause the plugin to retry indefinately. +A value of 0 will cause the plugin to never retry. +Otherwise it will retry up to the specified mumber of times. + + +[id="{version}-plugins-{type}s-{plugin}-measurement"] +===== `measurement` + + * Value type is <> + * Default value is `"logstash"` + +Measurement name - supports sprintf formatting + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `nil` + +The password for the user who access to the named database + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8086` + +The port for InfluxDB + +[id="{version}-plugins-{type}s-{plugin}-retention_policy"] +===== `retention_policy` + + * Value type is <> + * Default value is `"autogen"` + +The retention policy to use + +[id="{version}-plugins-{type}s-{plugin}-send_as_tags"] +===== `send_as_tags` + + * Value type is <> + * Default value is `["host"]` + +An array containing the names of fields to send to Influxdb as tags instead +of fields. Influxdb 0.9 convention is that values that do not change every +request should be considered metadata and given as tags. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + +Enable SSL/TLS secured communication to InfluxDB + +[id="{version}-plugins-{type}s-{plugin}-time_precision"] +===== `time_precision` + + * Value can be any of: `n`, `u`, `ms`, `s`, `m`, `h` + * Default value is `"ms"` + +Set the level of precision of `time` + +only useful when overriding the time value + +[id="{version}-plugins-{type}s-{plugin}-use_event_fields_for_data_points"] +===== `use_event_fields_for_data_points` + + * Value type is <> + * Default value is `false` + +Automatically use fields from the event as the data points sent to Influxdb + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `nil` + +The user who has access to the named database + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/irc-index.asciidoc b/docs/versioned-plugins/outputs/irc-index.asciidoc new file mode 100644 index 000000000..813d0eed3 --- /dev/null +++ b/docs/versioned-plugins/outputs/irc-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: irc +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::irc-v3.0.5.asciidoc[] +include::irc-v3.0.4.asciidoc[] +include::irc-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/irc-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/irc-v3.0.3.asciidoc new file mode 100644 index 000000000..18890070c --- /dev/null +++ b/docs/versioned-plugins/outputs/irc-v3.0.3.asciidoc @@ -0,0 +1,157 @@ +:plugin: irc +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-irc/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Irc output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events to IRC + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Irc Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-messages_per_second>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nick>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-post_string>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pre_string>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-real>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-channels"] +===== `channels` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Channels to broadcast to. + +These should be full channel names including the '#' symbol, such as +"#logstash". + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * Default value is `"%{message}"` + +Message format to send, event tokens are usable here + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Address of the host to connect to + +[id="{version}-plugins-{type}s-{plugin}-messages_per_second"] +===== `messages_per_second` + + * Value type is <> + * Default value is `0.5` + +Limit the rate of messages sent to IRC in messages per second. + +[id="{version}-plugins-{type}s-{plugin}-nick"] +===== `nick` + + * Value type is <> + * Default value is `"logstash"` + +IRC Nickname + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +IRC server password + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6667` + +Port on host to connect to. + +[id="{version}-plugins-{type}s-{plugin}-post_string"] +===== `post_string` + + * Value type is <> + * There is no default value for this setting. + +Static string after event + +[id="{version}-plugins-{type}s-{plugin}-pre_string"] +===== `pre_string` + + * Value type is <> + * There is no default value for this setting. + +Static string before event + +[id="{version}-plugins-{type}s-{plugin}-real"] +===== `real` + + * Value type is <> + * Default value is `"logstash"` + +IRC Real name + +[id="{version}-plugins-{type}s-{plugin}-secure"] +===== `secure` + + * Value type is <> + * Default value is `false` + +Set this to true to enable SSL. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"logstash"` + +IRC Username + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/irc-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/irc-v3.0.4.asciidoc new file mode 100644 index 000000000..bc527f22a --- /dev/null +++ b/docs/versioned-plugins/outputs/irc-v3.0.4.asciidoc @@ -0,0 +1,157 @@ +:plugin: irc +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-irc/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Irc output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events to IRC + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Irc Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-messages_per_second>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nick>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-post_string>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pre_string>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-real>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-channels"] +===== `channels` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Channels to broadcast to. + +These should be full channel names including the '#' symbol, such as +"#logstash". + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * Default value is `"%{message}"` + +Message format to send, event tokens are usable here + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Address of the host to connect to + +[id="{version}-plugins-{type}s-{plugin}-messages_per_second"] +===== `messages_per_second` + + * Value type is <> + * Default value is `0.5` + +Limit the rate of messages sent to IRC in messages per second. + +[id="{version}-plugins-{type}s-{plugin}-nick"] +===== `nick` + + * Value type is <> + * Default value is `"logstash"` + +IRC Nickname + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +IRC server password + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6667` + +Port on host to connect to. + +[id="{version}-plugins-{type}s-{plugin}-post_string"] +===== `post_string` + + * Value type is <> + * There is no default value for this setting. + +Static string after event + +[id="{version}-plugins-{type}s-{plugin}-pre_string"] +===== `pre_string` + + * Value type is <> + * There is no default value for this setting. + +Static string before event + +[id="{version}-plugins-{type}s-{plugin}-real"] +===== `real` + + * Value type is <> + * Default value is `"logstash"` + +IRC Real name + +[id="{version}-plugins-{type}s-{plugin}-secure"] +===== `secure` + + * Value type is <> + * Default value is `false` + +Set this to true to enable SSL. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"logstash"` + +IRC Username + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/irc-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/irc-v3.0.5.asciidoc new file mode 100644 index 000000000..1b27da973 --- /dev/null +++ b/docs/versioned-plugins/outputs/irc-v3.0.5.asciidoc @@ -0,0 +1,157 @@ +:plugin: irc +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-irc/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Irc output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events to IRC + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Irc Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-messages_per_second>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nick>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-post_string>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pre_string>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-real>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-channels"] +===== `channels` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Channels to broadcast to. + +These should be full channel names including the '#' symbol, such as +"#logstash". + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * Default value is `"%{message}"` + +Message format to send, event tokens are usable here + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Address of the host to connect to + +[id="{version}-plugins-{type}s-{plugin}-messages_per_second"] +===== `messages_per_second` + + * Value type is <> + * Default value is `0.5` + +Limit the rate of messages sent to IRC in messages per second. + +[id="{version}-plugins-{type}s-{plugin}-nick"] +===== `nick` + + * Value type is <> + * Default value is `"logstash"` + +IRC Nickname + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +IRC server password + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6667` + +Port on host to connect to. + +[id="{version}-plugins-{type}s-{plugin}-post_string"] +===== `post_string` + + * Value type is <> + * There is no default value for this setting. + +Static string after event + +[id="{version}-plugins-{type}s-{plugin}-pre_string"] +===== `pre_string` + + * Value type is <> + * There is no default value for this setting. + +Static string before event + +[id="{version}-plugins-{type}s-{plugin}-real"] +===== `real` + + * Value type is <> + * Default value is `"logstash"` + +IRC Real name + +[id="{version}-plugins-{type}s-{plugin}-secure"] +===== `secure` + + * Value type is <> + * Default value is `false` + +Set this to true to enable SSL. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"logstash"` + +IRC Username + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/jira-index.asciidoc b/docs/versioned-plugins/outputs/jira-index.asciidoc new file mode 100644 index 000000000..f14aedef8 --- /dev/null +++ b/docs/versioned-plugins/outputs/jira-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: jira +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::jira-v3.0.3.asciidoc[] +include::jira-v3.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/jira-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/jira-v3.0.2.asciidoc new file mode 100644 index 000000000..67c0e63c6 --- /dev/null +++ b/docs/versioned-plugins/outputs/jira-v3.0.2.asciidoc @@ -0,0 +1,195 @@ +:plugin: jira +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-jira/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Jira output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you to use Logstash to parse and structure +your logs and ship structured event data to JIRA. + +Structured event data will be added to the JIRA issue as 'Description' field value. + +Example JSON-encoded event: + +[source,yaml] +----------------------------------------------------------------------------- + { + "message": "Hello JIRA!", + "@version": "1", + "@timestamp": "2015-06-04T10:23:30.279Z", + "type": "syslog", + "host": "192.168.1.42", + "syslog_pri": "11", + "syslog_timestamp": "Jun 4 14:23:30", + "syslog_host": "myhost", + "program": "root", + "syslog_severity_code": 3, + "syslog_facility_code": 1, + "syslog_facility": "user-level", + "syslog_severity": "error" + } +----------------------------------------------------------------------------- + +Example JIRA issue created the event above: + +[source,shell] +----------------------------------------------------------------------------- + Type: Task + Priority: 2 - Major + Status: TO DO + Resolution: Unresolved + Summary: [logstash] Hello JIRA! + Description: + --- + message: Hello JIRA! + '@version': '1' + '@timestamp': 2015-06-04 10:23:30.279000000 Z + type: syslog + host: 192.168.1.42 + syslog_pri: '11' + syslog_timestamp: Jun 4 14:23:30 + syslog_host: myhost + program: root + syslog_severity_code: 3 + syslog_facility_code: 1 + syslog_facility: user-level + syslog_severity: error +----------------------------------------------------------------------------- + +To use this output you'll need to ensure that your JIRA instance allows REST calls. + +This output uses `jiralicious` as the bridge to JIRA +By Martin Cleaver, Blended Perspectives +with a lot of help from 'electrical' in #logstash. + +Origin +and +via . + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jira Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-assignee>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-issuetypeid>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-priority>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-projectid>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-reporter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-summary>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-assignee"] +===== `assignee` + + * Value type is <> + * There is no default value for this setting. + +JIRA Reporter + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * There is no default value for this setting. + +The hostname to send logs to. This should target your JIRA server +and has to have the REST interface enabled. + +[id="{version}-plugins-{type}s-{plugin}-issuetypeid"] +===== `issuetypeid` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JIRA Issuetype number + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-priority"] +===== `priority` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JIRA Priority + +[id="{version}-plugins-{type}s-{plugin}-projectid"] +===== `projectid` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Javalicious has no proxy support +JIRA Project number + +[id="{version}-plugins-{type}s-{plugin}-reporter"] +===== `reporter` + + * Value type is <> + * There is no default value for this setting. + +JIRA Reporter + +[id="{version}-plugins-{type}s-{plugin}-summary"] +===== `summary` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JIRA Summary + +Truncated and appended with '...' if longer than 255 characters. + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/jira-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/jira-v3.0.3.asciidoc new file mode 100644 index 000000000..e32e496b7 --- /dev/null +++ b/docs/versioned-plugins/outputs/jira-v3.0.3.asciidoc @@ -0,0 +1,195 @@ +:plugin: jira +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-jira/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Jira output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you to use Logstash to parse and structure +your logs and ship structured event data to JIRA. + +Structured event data will be added to the JIRA issue as 'Description' field value. + +Example JSON-encoded event: + +[source,yaml] +----------------------------------------------------------------------------- + { + "message": "Hello JIRA!", + "@version": "1", + "@timestamp": "2015-06-04T10:23:30.279Z", + "type": "syslog", + "host": "192.168.1.42", + "syslog_pri": "11", + "syslog_timestamp": "Jun 4 14:23:30", + "syslog_host": "myhost", + "program": "root", + "syslog_severity_code": 3, + "syslog_facility_code": 1, + "syslog_facility": "user-level", + "syslog_severity": "error" + } +----------------------------------------------------------------------------- + +Example JIRA issue created the event above: + +[source,shell] +----------------------------------------------------------------------------- + Type: Task + Priority: 2 - Major + Status: TO DO + Resolution: Unresolved + Summary: [logstash] Hello JIRA! + Description: + --- + message: Hello JIRA! + '@version': '1' + '@timestamp': 2015-06-04 10:23:30.279000000 Z + type: syslog + host: 192.168.1.42 + syslog_pri: '11' + syslog_timestamp: Jun 4 14:23:30 + syslog_host: myhost + program: root + syslog_severity_code: 3 + syslog_facility_code: 1 + syslog_facility: user-level + syslog_severity: error +----------------------------------------------------------------------------- + +To use this output you'll need to ensure that your JIRA instance allows REST calls. + +This output uses `jiralicious` as the bridge to JIRA +By Martin Cleaver, Blended Perspectives +with a lot of help from 'electrical' in #logstash. + +Origin +and +via . + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jira Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-assignee>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-issuetypeid>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-priority>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-projectid>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-reporter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-summary>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-assignee"] +===== `assignee` + + * Value type is <> + * There is no default value for this setting. + +JIRA Reporter + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * There is no default value for this setting. + +The hostname to send logs to. This should target your JIRA server +and has to have the REST interface enabled. + +[id="{version}-plugins-{type}s-{plugin}-issuetypeid"] +===== `issuetypeid` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JIRA Issuetype number + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-priority"] +===== `priority` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JIRA Priority + +[id="{version}-plugins-{type}s-{plugin}-projectid"] +===== `projectid` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Javalicious has no proxy support +JIRA Project number + +[id="{version}-plugins-{type}s-{plugin}-reporter"] +===== `reporter` + + * Value type is <> + * There is no default value for this setting. + +JIRA Reporter + +[id="{version}-plugins-{type}s-{plugin}-summary"] +===== `summary` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +JIRA Summary + +Truncated and appended with '...' if longer than 255 characters. + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/jms-index.asciidoc b/docs/versioned-plugins/outputs/jms-index.asciidoc new file mode 100644 index 000000000..4c24c6414 --- /dev/null +++ b/docs/versioned-plugins/outputs/jms-index.asciidoc @@ -0,0 +1,12 @@ +:plugin: jms +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-06-23 +|======================================================================= + +include::jms-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/jms-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/jms-v3.0.1.asciidoc new file mode 100644 index 000000000..444a5af7e --- /dev/null +++ b/docs/versioned-plugins/outputs/jms-v3.0.1.asciidoc @@ -0,0 +1,175 @@ +:plugin: jms +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-jms/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Jms output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events to a Jms Broker. Supports both Jms Queues and Topics. + +For more information about Jms, see +For more information about the Ruby Gem used, see +Here is a config example : + jms { + include_header => false + include_properties => false + include_body => true + use_jms_timestamp => false + queue_name => "myqueue" + yaml_file => "~/jms.yml" + yaml_section => "mybroker" + } + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Jms Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-broker_url>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-delivery_mode>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-factory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jndi_context>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-jndi_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pub_sub>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-require_jars>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-yaml_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-yaml_section>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-broker_url"] +===== `broker_url` + + * Value type is <> + * There is no default value for this setting. + +Url to use when connecting to the JMS provider + +[id="{version}-plugins-{type}s-{plugin}-delivery_mode"] +===== `delivery_mode` + + * Value type is <> + * Default value is `nil` + +Name of delivery mode to use +Options are "persistent" and "non_persistent" if not defined nothing will be passed. + +[id="{version}-plugins-{type}s-{plugin}-destination"] +===== `destination` + + * Value type is <> + * There is no default value for this setting. + +Name of the destination queue or topic to use. +Mandatory + +[id="{version}-plugins-{type}s-{plugin}-factory"] +===== `factory` + + * Value type is <> + * There is no default value for this setting. + +Name of JMS Provider Factory class + +[id="{version}-plugins-{type}s-{plugin}-jndi_context"] +===== `jndi_context` + + * Value type is <> + * There is no default value for this setting. + +Mandatory if jndi lookup is being used, +contains details on how to connect to JNDI server + +[id="{version}-plugins-{type}s-{plugin}-jndi_name"] +===== `jndi_name` + + * Value type is <> + * There is no default value for this setting. + +Name of JNDI entry at which the Factory can be found + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to use when connecting to the JMS provider + +[id="{version}-plugins-{type}s-{plugin}-pub_sub"] +===== `pub_sub` + + * Value type is <> + * Default value is `false` + +If pub-sub (topic) style should be used or not. +Mandatory + +[id="{version}-plugins-{type}s-{plugin}-require_jars"] +===== `require_jars` + + * Value type is <> + * There is no default value for this setting. + +If you do not use an yaml configuration use either the factory or jndi_name. +An optional array of Jar file names to load for the specified +JMS provider. By using this option it is not necessary +to put all the JMS Provider specific jar files into the +java CLASSPATH prior to starting Logstash. + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * Value type is <> + * There is no default value for this setting. + +Username to connect to JMS provider with + +[id="{version}-plugins-{type}s-{plugin}-yaml_file"] +===== `yaml_file` + + * Value type is <> + * There is no default value for this setting. + +Yaml config file + +[id="{version}-plugins-{type}s-{plugin}-yaml_section"] +===== `yaml_section` + + * Value type is <> + * There is no default value for this setting. + +Yaml config file section name +For some known examples, see: [Example jms.yml](https://github.com/reidmorrison/jruby-jms/blob/master/examples/jms.yml) + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/juggernaut-index.asciidoc b/docs/versioned-plugins/outputs/juggernaut-index.asciidoc new file mode 100644 index 000000000..ff9fdb2b9 --- /dev/null +++ b/docs/versioned-plugins/outputs/juggernaut-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: juggernaut +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::juggernaut-v3.0.5.asciidoc[] +include::juggernaut-v3.0.4.asciidoc[] +include::juggernaut-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/juggernaut-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/juggernaut-v3.0.3.asciidoc new file mode 100644 index 000000000..844c8944e --- /dev/null +++ b/docs/versioned-plugins/outputs/juggernaut-v3.0.3.asciidoc @@ -0,0 +1,115 @@ +:plugin: juggernaut +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-juggernaut/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Juggernaut output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push messages to the juggernaut websockets server: + +* https://github.com/maccman/juggernaut + +Wraps Websockets and supports other methods (including xhr longpolling) This +is basically, just an extension of the redis output (Juggernaut pulls +messages from redis). But it pushes messages to a particular channel and +formats the messages in the way juggernaut expects. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Juggernaut Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-channels"] +===== `channels` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +List of channels to which to publish. Dynamic names are +valid here, for example `logstash-%{type}`. + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * Value type is <> + * Default value is `0` + +The redis database number. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"127.0.0.1"` + +The hostname of the redis server to which juggernaut is listening. + +[id="{version}-plugins-{type}s-{plugin}-message_format"] +===== `message_format` + + * Value type is <> + * There is no default value for this setting. + +How should the message be formatted before pushing to the websocket. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate with. There is no authentication by default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6379` + +The port to connect on. + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `5` + +Redis initial connection timeout in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/juggernaut-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/juggernaut-v3.0.4.asciidoc new file mode 100644 index 000000000..471d40797 --- /dev/null +++ b/docs/versioned-plugins/outputs/juggernaut-v3.0.4.asciidoc @@ -0,0 +1,115 @@ +:plugin: juggernaut +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-juggernaut/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Juggernaut output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push messages to the juggernaut websockets server: + +* https://github.com/maccman/juggernaut + +Wraps Websockets and supports other methods (including xhr longpolling) This +is basically, just an extension of the redis output (Juggernaut pulls +messages from redis). But it pushes messages to a particular channel and +formats the messages in the way juggernaut expects. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Juggernaut Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-channels"] +===== `channels` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +List of channels to which to publish. Dynamic names are +valid here, for example `logstash-%{type}`. + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * Value type is <> + * Default value is `0` + +The redis database number. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"127.0.0.1"` + +The hostname of the redis server to which juggernaut is listening. + +[id="{version}-plugins-{type}s-{plugin}-message_format"] +===== `message_format` + + * Value type is <> + * There is no default value for this setting. + +How should the message be formatted before pushing to the websocket. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate with. There is no authentication by default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6379` + +The port to connect on. + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `5` + +Redis initial connection timeout in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/juggernaut-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/juggernaut-v3.0.5.asciidoc new file mode 100644 index 000000000..eb81df870 --- /dev/null +++ b/docs/versioned-plugins/outputs/juggernaut-v3.0.5.asciidoc @@ -0,0 +1,115 @@ +:plugin: juggernaut +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-juggernaut/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Juggernaut output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push messages to the juggernaut websockets server: + +* https://github.com/maccman/juggernaut + +Wraps Websockets and supports other methods (including xhr longpolling) This +is basically, just an extension of the redis output (Juggernaut pulls +messages from redis). But it pushes messages to a particular channel and +formats the messages in the way juggernaut expects. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Juggernaut Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-channels"] +===== `channels` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +List of channels to which to publish. Dynamic names are +valid here, for example `logstash-%{type}`. + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * Value type is <> + * Default value is `0` + +The redis database number. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"127.0.0.1"` + +The hostname of the redis server to which juggernaut is listening. + +[id="{version}-plugins-{type}s-{plugin}-message_format"] +===== `message_format` + + * Value type is <> + * There is no default value for this setting. + +How should the message be formatted before pushing to the websocket. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate with. There is no authentication by default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6379` + +The port to connect on. + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `5` + +Redis initial connection timeout in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/kafka-index.asciidoc b/docs/versioned-plugins/outputs/kafka-index.asciidoc new file mode 100644 index 000000000..774f3ab87 --- /dev/null +++ b/docs/versioned-plugins/outputs/kafka-index.asciidoc @@ -0,0 +1,26 @@ +:plugin: kafka +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2018-01-05 +| <> | 2017-10-25 +| <> | 2017-10-09 +| <> | 2017-08-16 +| <> | 2017-08-01 +| <> | 2017-07-11 +| <> | 2017-06-23 +| <> | 2017-05-11 +|======================================================================= + +include::kafka-v7.0.6.asciidoc[] +include::kafka-v7.0.4.asciidoc[] +include::kafka-v7.0.3.asciidoc[] +include::kafka-v7.0.1.asciidoc[] +include::kafka-v7.0.0.asciidoc[] +include::kafka-v6.2.2.asciidoc[] +include::kafka-v6.2.1.asciidoc[] +include::kafka-v6.2.0.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/kafka-v6.2.0.asciidoc b/docs/versioned-plugins/outputs/kafka-v6.2.0.asciidoc new file mode 100644 index 000000000..9f5e0fe01 --- /dev/null +++ b/docs/versioned-plugins/outputs/kafka-v6.2.0.asciidoc @@ -0,0 +1,449 @@ +:plugin: kafka +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v6.2.0 +:release_date: 2017-05-11 +:changelog_url: https://github.com/logstash-plugins/logstash-output-kafka/blob/v6.2.0/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Kafka + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on +the broker. + +Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination +of Logstash and the Kafka output plugin: + +[options="header"] +|========================================================== +|Kafka Client Version |Logstash Version |Plugin Version |Why? +|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular +|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) +|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) +|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker +|0.10.1.x |2.4.x - 5.x.x | 6.x.x | +|========================================================== + +NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should +upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker +is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. + +This output supports connecting to Kafka over: + +* SSL (requires plugin version 3.0.0 or later) +* Kerberos SASL (requires plugin version 5.1.0 or later) + +By default security is disabled but can be turned on as needed. + +The only required configuration is the topic_id. The default codec is plain, +so events will be persisted on the broker in plain format. Logstash will encode your messages with not +only the message but also with a timestamp and hostname. If you do not want anything but your message +passing through, you should make the output configuration something like: +[source,ruby] + output { + kafka { + codec => plain { + format => "%{message}" + } + topic_id => "mytopic" + } + } +For more information see http://kafka.apache.org/documentation.html#theproducer + +Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kafka Output Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-acks>> |<>, one of `["0", "1", "all"]`|No +| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-buffer_memory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-compression_type>> |<>, one of `["none", "gzip", "snappy", "lz4"]`|No +| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-key_serializer>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-linger_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_request_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No +| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topic_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-value_serializer>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-acks"] +===== `acks` + + * Value can be any of: `0`, `1`, `all` + * Default value is `"1"` + +The number of acknowledgments the producer requires the leader to have received +before considering a request complete. + +acks=0, the producer will not wait for any acknowledgment from the server at all. +acks=1, This will mean the leader will write the record to its local log but + will respond without awaiting full acknowledgement from all followers. +acks=all, This means the leader will wait for the full set of in-sync replicas to acknowledge the record. + +[id="{version}-plugins-{type}s-{plugin}-batch_size"] +===== `batch_size` + + * Value type is <> + * Default value is `16384` + +The producer will attempt to batch records together into fewer requests whenever multiple +records are being sent to the same partition. This helps performance on both the client +and the server. This configuration controls the default batch size in bytes. + +[id="{version}-plugins-{type}s-{plugin}-block_on_buffer_full"] +===== `block_on_buffer_full` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `true` + +When our memory buffer is exhausted we must either stop accepting new +records (block) or throw errors. By default this setting is true and we block, +however in some scenarios blocking is not desirable and it is better to immediately give an error. + +[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] +===== `bootstrap_servers` + + * Value type is <> + * Default value is `"localhost:9092"` + +This is for bootstrapping and the producer will only use it for getting metadata (topics, +partitions and replicas). The socket connections for sending the actual data will be +established based on the broker information returned in the metadata. The format is +`host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a +subset of brokers. + +[id="{version}-plugins-{type}s-{plugin}-buffer_memory"] +===== `buffer_memory` + + * Value type is <> + * Default value is `33554432` + +The total bytes of memory the producer can use to buffer records waiting to be sent to the server. + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * Value type is <> + * There is no default value for this setting. + +The id string to pass to the server when making requests. +The purpose of this is to be able to track the source of requests beyond just +ip/port by allowing a logical application name to be included with the request + +[id="{version}-plugins-{type}s-{plugin}-compression_type"] +===== `compression_type` + + * Value can be any of: `none`, `gzip`, `snappy`, `lz4` + * Default value is `"none"` + +The compression type for all data generated by the producer. +The default is none (i.e. no compression). Valid values are none, gzip, or snappy. + +[id="{version}-plugins-{type}s-{plugin}-jaas_path"] +===== `jaas_path` + + * Value type is <> + * There is no default value for this setting. + +The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization +services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: +[source,java] +---------------------------------- +KafkaClient { + com.sun.security.auth.module.Krb5LoginModule required + useTicketCache=true + renewTicket=true + serviceName="kafka"; + }; +---------------------------------- + +Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these +to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same +`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on +different JVM instances. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] +===== `kerberos_config` + + * Value type is <> + * There is no default value for this setting. + +Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html + +[id="{version}-plugins-{type}s-{plugin}-key_serializer"] +===== `key_serializer` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` + +Serializer class for the key of the message + +[id="{version}-plugins-{type}s-{plugin}-linger_ms"] +===== `linger_ms` + + * Value type is <> + * Default value is `0` + +The producer groups together any records that arrive in between request +transmissions into a single batched request. Normally this occurs only under +load when records arrive faster than they can be sent out. However in some circumstances +the client may want to reduce the number of requests even under moderate load. +This setting accomplishes this by adding a small amount of artificial delay—that is, +rather than immediately sending out a record the producer will wait for up to the given delay +to allow other records to be sent so that the sends can be batched together. + +[id="{version}-plugins-{type}s-{plugin}-max_request_size"] +===== `max_request_size` + + * Value type is <> + * Default value is `1048576` + +The maximum size of a request + +[id="{version}-plugins-{type}s-{plugin}-message_key"] +===== `message_key` + + * Value type is <> + * There is no default value for this setting. + +The key for the message + +[id="{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"] +===== `metadata_fetch_timeout_ms` + + * Value type is <> + * Default value is `60000` + +the timeout setting for initial metadata request to fetch topic metadata. + +[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] +===== `metadata_max_age_ms` + + * Value type is <> + * Default value is `300000` + +the max time in milliseconds before a metadata refresh is forced. + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * Default value is `32768` + +The size of the TCP receive buffer to use when reading data + +[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] +===== `reconnect_backoff_ms` + + * Value type is <> + * Default value is `10` + +The amount of time to wait before attempting to reconnect to a given host when a connection fails. + +[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] +===== `request_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The configuration controls the maximum amount of time the client will wait +for the response of a request. If the response is not received before the timeout +elapses the client will resend the request if necessary or fail the request if +retries are exhausted. + +[id="{version}-plugins-{type}s-{plugin}-retries"] +===== `retries` + + * Value type is <> + * Default value is `0` + +Setting a value greater than zero will cause the client to +resend any record whose send fails with a potentially transient error. + +[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] +===== `retry_backoff_ms` + + * Value type is <> + * Default value is `100` + +The amount of time to wait before attempting to retry a failed produce request to a given topic partition. + +[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] +===== `sasl_kerberos_service_name` + + * Value type is <> + * There is no default value for this setting. + +The Kerberos principal name that Kafka broker runs as. +This can be defined either in Kafka's JAAS config or in Kafka's config. + +[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] +===== `sasl_mechanism` + + * Value type is <> + * Default value is `"GSSAPI"` + +http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. +This may be any mechanism for which a security provider is available. +GSSAPI is the default mechanism. + +[id="{version}-plugins-{type}s-{plugin}-security_protocol"] +===== `security_protocol` + + * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` + * Default value is `"PLAINTEXT"` + +Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL + +[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] +===== `send_buffer_bytes` + + * Value type is <> + * Default value is `131072` + +The size of the TCP send buffer to use when sending data. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `false` + +Enable SSL/TLS secured communication to Kafka broker. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] +===== `ssl_key_password` + + * Value type is <> + * There is no default value for this setting. + +The password of the private key in the key store file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] +===== `ssl_keystore_location` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore path. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value type is <> + * There is no default value for this setting. + +The keystore type. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] +===== `ssl_truststore_location` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore path to validate the Kafka broker's certificate. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is <> + * There is no default value for this setting. + +The truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + + * Value type is <> + * There is no default value for this setting. + +The truststore type. + +[id="{version}-plugins-{type}s-{plugin}-timeout_ms"] +===== `timeout_ms` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `30000` + +The configuration controls the maximum amount of time the server will wait for acknowledgments +from followers to meet the acknowledgment requirements the producer has specified with the +acks configuration. If the requested number of acknowledgments are not met when the timeout +elapses an error will be returned. This timeout is measured on the server side and does not +include the network latency of the request. + +[id="{version}-plugins-{type}s-{plugin}-topic_id"] +===== `topic_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The topic to produce messages to + +[id="{version}-plugins-{type}s-{plugin}-value_serializer"] +===== `value_serializer` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` + +Serializer class for the value of the message + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/kafka-v6.2.1.asciidoc b/docs/versioned-plugins/outputs/kafka-v6.2.1.asciidoc new file mode 100644 index 000000000..2dda41534 --- /dev/null +++ b/docs/versioned-plugins/outputs/kafka-v6.2.1.asciidoc @@ -0,0 +1,450 @@ +:plugin: kafka +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v6.2.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-kafka/blob/v6.2.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Kafka output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on +the broker. + +Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination +of Logstash and the Kafka output plugin: + +[options="header"] +|========================================================== +|Kafka Client Version |Logstash Version |Plugin Version |Why? +|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular +|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) +|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) +|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker +|0.10.1.x |2.4.x - 5.x.x | 6.x.x | +|========================================================== + +NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should +upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker +is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. + +This output supports connecting to Kafka over: + +* SSL (requires plugin version 3.0.0 or later) +* Kerberos SASL (requires plugin version 5.1.0 or later) + +By default security is disabled but can be turned on as needed. + +The only required configuration is the topic_id. The default codec is plain, +so events will be persisted on the broker in plain format. Logstash will encode your messages with not +only the message but also with a timestamp and hostname. If you do not want anything but your message +passing through, you should make the output configuration something like: +[source,ruby] + output { + kafka { + codec => plain { + format => "%{message}" + } + topic_id => "mytopic" + } + } +For more information see http://kafka.apache.org/documentation.html#theproducer + +Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kafka Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-acks>> |<>, one of `["0", "1", "all"]`|No +| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-buffer_memory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-compression_type>> |<>, one of `["none", "gzip", "snappy", "lz4"]`|No +| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-key_serializer>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-linger_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_request_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No +| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topic_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-value_serializer>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-acks"] +===== `acks` + + * Value can be any of: `0`, `1`, `all` + * Default value is `"1"` + +The number of acknowledgments the producer requires the leader to have received +before considering a request complete. + +acks=0, the producer will not wait for any acknowledgment from the server at all. +acks=1, This will mean the leader will write the record to its local log but + will respond without awaiting full acknowledgement from all followers. +acks=all, This means the leader will wait for the full set of in-sync replicas to acknowledge the record. + +[id="{version}-plugins-{type}s-{plugin}-batch_size"] +===== `batch_size` + + * Value type is <> + * Default value is `16384` + +The producer will attempt to batch records together into fewer requests whenever multiple +records are being sent to the same partition. This helps performance on both the client +and the server. This configuration controls the default batch size in bytes. + +[id="{version}-plugins-{type}s-{plugin}-block_on_buffer_full"] +===== `block_on_buffer_full` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `true` + +When our memory buffer is exhausted we must either stop accepting new +records (block) or throw errors. By default this setting is true and we block, +however in some scenarios blocking is not desirable and it is better to immediately give an error. + +[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] +===== `bootstrap_servers` + + * Value type is <> + * Default value is `"localhost:9092"` + +This is for bootstrapping and the producer will only use it for getting metadata (topics, +partitions and replicas). The socket connections for sending the actual data will be +established based on the broker information returned in the metadata. The format is +`host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a +subset of brokers. + +[id="{version}-plugins-{type}s-{plugin}-buffer_memory"] +===== `buffer_memory` + + * Value type is <> + * Default value is `33554432` + +The total bytes of memory the producer can use to buffer records waiting to be sent to the server. + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * Value type is <> + * There is no default value for this setting. + +The id string to pass to the server when making requests. +The purpose of this is to be able to track the source of requests beyond just +ip/port by allowing a logical application name to be included with the request + +[id="{version}-plugins-{type}s-{plugin}-compression_type"] +===== `compression_type` + + * Value can be any of: `none`, `gzip`, `snappy`, `lz4` + * Default value is `"none"` + +The compression type for all data generated by the producer. +The default is none (i.e. no compression). Valid values are none, gzip, or snappy. + +[id="{version}-plugins-{type}s-{plugin}-jaas_path"] +===== `jaas_path` + + * Value type is <> + * There is no default value for this setting. + +The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization +services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: +[source,java] +---------------------------------- +KafkaClient { + com.sun.security.auth.module.Krb5LoginModule required + useTicketCache=true + renewTicket=true + serviceName="kafka"; + }; +---------------------------------- + +Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these +to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same +`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on +different JVM instances. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] +===== `kerberos_config` + + * Value type is <> + * There is no default value for this setting. + +Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html + +[id="{version}-plugins-{type}s-{plugin}-key_serializer"] +===== `key_serializer` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` + +Serializer class for the key of the message + +[id="{version}-plugins-{type}s-{plugin}-linger_ms"] +===== `linger_ms` + + * Value type is <> + * Default value is `0` + +The producer groups together any records that arrive in between request +transmissions into a single batched request. Normally this occurs only under +load when records arrive faster than they can be sent out. However in some circumstances +the client may want to reduce the number of requests even under moderate load. +This setting accomplishes this by adding a small amount of artificial delay—that is, +rather than immediately sending out a record the producer will wait for up to the given delay +to allow other records to be sent so that the sends can be batched together. + +[id="{version}-plugins-{type}s-{plugin}-max_request_size"] +===== `max_request_size` + + * Value type is <> + * Default value is `1048576` + +The maximum size of a request + +[id="{version}-plugins-{type}s-{plugin}-message_key"] +===== `message_key` + + * Value type is <> + * There is no default value for this setting. + +The key for the message + +[id="{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"] +===== `metadata_fetch_timeout_ms` + + * Value type is <> + * Default value is `60000` + +the timeout setting for initial metadata request to fetch topic metadata. + +[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] +===== `metadata_max_age_ms` + + * Value type is <> + * Default value is `300000` + +the max time in milliseconds before a metadata refresh is forced. + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * Default value is `32768` + +The size of the TCP receive buffer to use when reading data + +[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] +===== `reconnect_backoff_ms` + + * Value type is <> + * Default value is `10` + +The amount of time to wait before attempting to reconnect to a given host when a connection fails. + +[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] +===== `request_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The configuration controls the maximum amount of time the client will wait +for the response of a request. If the response is not received before the timeout +elapses the client will resend the request if necessary or fail the request if +retries are exhausted. + +[id="{version}-plugins-{type}s-{plugin}-retries"] +===== `retries` + + * Value type is <> + * Default value is `0` + +Setting a value greater than zero will cause the client to +resend any record whose send fails with a potentially transient error. + +[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] +===== `retry_backoff_ms` + + * Value type is <> + * Default value is `100` + +The amount of time to wait before attempting to retry a failed produce request to a given topic partition. + +[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] +===== `sasl_kerberos_service_name` + + * Value type is <> + * There is no default value for this setting. + +The Kerberos principal name that Kafka broker runs as. +This can be defined either in Kafka's JAAS config or in Kafka's config. + +[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] +===== `sasl_mechanism` + + * Value type is <> + * Default value is `"GSSAPI"` + +http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. +This may be any mechanism for which a security provider is available. +GSSAPI is the default mechanism. + +[id="{version}-plugins-{type}s-{plugin}-security_protocol"] +===== `security_protocol` + + * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` + * Default value is `"PLAINTEXT"` + +Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL + +[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] +===== `send_buffer_bytes` + + * Value type is <> + * Default value is `131072` + +The size of the TCP send buffer to use when sending data. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `false` + +Enable SSL/TLS secured communication to Kafka broker. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] +===== `ssl_key_password` + + * Value type is <> + * There is no default value for this setting. + +The password of the private key in the key store file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] +===== `ssl_keystore_location` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore path. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value type is <> + * There is no default value for this setting. + +The keystore type. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] +===== `ssl_truststore_location` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore path to validate the Kafka broker's certificate. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is <> + * There is no default value for this setting. + +The truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + + * Value type is <> + * There is no default value for this setting. + +The truststore type. + +[id="{version}-plugins-{type}s-{plugin}-timeout_ms"] +===== `timeout_ms` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `30000` + +The configuration controls the maximum amount of time the server will wait for acknowledgments +from followers to meet the acknowledgment requirements the producer has specified with the +acks configuration. If the requested number of acknowledgments are not met when the timeout +elapses an error will be returned. This timeout is measured on the server side and does not +include the network latency of the request. + +[id="{version}-plugins-{type}s-{plugin}-topic_id"] +===== `topic_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The topic to produce messages to + +[id="{version}-plugins-{type}s-{plugin}-value_serializer"] +===== `value_serializer` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` + +Serializer class for the value of the message + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/kafka-v6.2.2.asciidoc b/docs/versioned-plugins/outputs/kafka-v6.2.2.asciidoc new file mode 100644 index 000000000..7e1ccef09 --- /dev/null +++ b/docs/versioned-plugins/outputs/kafka-v6.2.2.asciidoc @@ -0,0 +1,451 @@ +:plugin: kafka +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v6.2.2 +:release_date: 2017-07-11 +:changelog_url: https://github.com/logstash-plugins/logstash-output-kafka/blob/v6.2.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Kafka output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on +the broker. + +Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination +of Logstash and the Kafka output plugin: + +[options="header"] +|========================================================== +|Kafka Client Version |Logstash Version |Plugin Version |Why? +|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular +|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) +|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) +|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker +|0.10.1.x |2.4.x - 5.x.x | 6.x.x | +|0.11.0.0 |2.4.x - 5.x.x | 6.2.2 |Not compatible with the <= 0.9 broker +|========================================================== + +NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should +upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker +is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. + +This output supports connecting to Kafka over: + +* SSL (requires plugin version 3.0.0 or later) +* Kerberos SASL (requires plugin version 5.1.0 or later) + +By default security is disabled but can be turned on as needed. + +The only required configuration is the topic_id. The default codec is plain, +so events will be persisted on the broker in plain format. Logstash will encode your messages with not +only the message but also with a timestamp and hostname. If you do not want anything but your message +passing through, you should make the output configuration something like: +[source,ruby] + output { + kafka { + codec => plain { + format => "%{message}" + } + topic_id => "mytopic" + } + } +For more information see http://kafka.apache.org/documentation.html#theproducer + +Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kafka Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-acks>> |<>, one of `["0", "1", "all"]`|No +| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-buffer_memory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-compression_type>> |<>, one of `["none", "gzip", "snappy", "lz4"]`|No +| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-key_serializer>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-linger_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_request_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No +| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topic_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-value_serializer>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-acks"] +===== `acks` + + * Value can be any of: `0`, `1`, `all` + * Default value is `"1"` + +The number of acknowledgments the producer requires the leader to have received +before considering a request complete. + +acks=0, the producer will not wait for any acknowledgment from the server at all. +acks=1, This will mean the leader will write the record to its local log but + will respond without awaiting full acknowledgement from all followers. +acks=all, This means the leader will wait for the full set of in-sync replicas to acknowledge the record. + +[id="{version}-plugins-{type}s-{plugin}-batch_size"] +===== `batch_size` + + * Value type is <> + * Default value is `16384` + +The producer will attempt to batch records together into fewer requests whenever multiple +records are being sent to the same partition. This helps performance on both the client +and the server. This configuration controls the default batch size in bytes. + +[id="{version}-plugins-{type}s-{plugin}-block_on_buffer_full"] +===== `block_on_buffer_full` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `true` + +When our memory buffer is exhausted we must either stop accepting new +records (block) or throw errors. By default this setting is true and we block, +however in some scenarios blocking is not desirable and it is better to immediately give an error. + +[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] +===== `bootstrap_servers` + + * Value type is <> + * Default value is `"localhost:9092"` + +This is for bootstrapping and the producer will only use it for getting metadata (topics, +partitions and replicas). The socket connections for sending the actual data will be +established based on the broker information returned in the metadata. The format is +`host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a +subset of brokers. + +[id="{version}-plugins-{type}s-{plugin}-buffer_memory"] +===== `buffer_memory` + + * Value type is <> + * Default value is `33554432` + +The total bytes of memory the producer can use to buffer records waiting to be sent to the server. + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * Value type is <> + * There is no default value for this setting. + +The id string to pass to the server when making requests. +The purpose of this is to be able to track the source of requests beyond just +ip/port by allowing a logical application name to be included with the request + +[id="{version}-plugins-{type}s-{plugin}-compression_type"] +===== `compression_type` + + * Value can be any of: `none`, `gzip`, `snappy`, `lz4` + * Default value is `"none"` + +The compression type for all data generated by the producer. +The default is none (i.e. no compression). Valid values are none, gzip, or snappy. + +[id="{version}-plugins-{type}s-{plugin}-jaas_path"] +===== `jaas_path` + + * Value type is <> + * There is no default value for this setting. + +The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization +services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: +[source,java] +---------------------------------- +KafkaClient { + com.sun.security.auth.module.Krb5LoginModule required + useTicketCache=true + renewTicket=true + serviceName="kafka"; + }; +---------------------------------- + +Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these +to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same +`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on +different JVM instances. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] +===== `kerberos_config` + + * Value type is <> + * There is no default value for this setting. + +Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html + +[id="{version}-plugins-{type}s-{plugin}-key_serializer"] +===== `key_serializer` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` + +Serializer class for the key of the message + +[id="{version}-plugins-{type}s-{plugin}-linger_ms"] +===== `linger_ms` + + * Value type is <> + * Default value is `0` + +The producer groups together any records that arrive in between request +transmissions into a single batched request. Normally this occurs only under +load when records arrive faster than they can be sent out. However in some circumstances +the client may want to reduce the number of requests even under moderate load. +This setting accomplishes this by adding a small amount of artificial delay—that is, +rather than immediately sending out a record the producer will wait for up to the given delay +to allow other records to be sent so that the sends can be batched together. + +[id="{version}-plugins-{type}s-{plugin}-max_request_size"] +===== `max_request_size` + + * Value type is <> + * Default value is `1048576` + +The maximum size of a request + +[id="{version}-plugins-{type}s-{plugin}-message_key"] +===== `message_key` + + * Value type is <> + * There is no default value for this setting. + +The key for the message + +[id="{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"] +===== `metadata_fetch_timeout_ms` + + * Value type is <> + * Default value is `60000` + +the timeout setting for initial metadata request to fetch topic metadata. + +[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] +===== `metadata_max_age_ms` + + * Value type is <> + * Default value is `300000` + +the max time in milliseconds before a metadata refresh is forced. + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * Default value is `32768` + +The size of the TCP receive buffer to use when reading data + +[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] +===== `reconnect_backoff_ms` + + * Value type is <> + * Default value is `10` + +The amount of time to wait before attempting to reconnect to a given host when a connection fails. + +[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] +===== `request_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The configuration controls the maximum amount of time the client will wait +for the response of a request. If the response is not received before the timeout +elapses the client will resend the request if necessary or fail the request if +retries are exhausted. + +[id="{version}-plugins-{type}s-{plugin}-retries"] +===== `retries` + + * Value type is <> + * Default value is `0` + +Setting a value greater than zero will cause the client to +resend any record whose send fails with a potentially transient error. + +[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] +===== `retry_backoff_ms` + + * Value type is <> + * Default value is `100` + +The amount of time to wait before attempting to retry a failed produce request to a given topic partition. + +[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] +===== `sasl_kerberos_service_name` + + * Value type is <> + * There is no default value for this setting. + +The Kerberos principal name that Kafka broker runs as. +This can be defined either in Kafka's JAAS config or in Kafka's config. + +[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] +===== `sasl_mechanism` + + * Value type is <> + * Default value is `"GSSAPI"` + +http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. +This may be any mechanism for which a security provider is available. +GSSAPI is the default mechanism. + +[id="{version}-plugins-{type}s-{plugin}-security_protocol"] +===== `security_protocol` + + * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` + * Default value is `"PLAINTEXT"` + +Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL + +[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] +===== `send_buffer_bytes` + + * Value type is <> + * Default value is `131072` + +The size of the TCP send buffer to use when sending data. + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `false` + +Enable SSL/TLS secured communication to Kafka broker. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] +===== `ssl_key_password` + + * Value type is <> + * There is no default value for this setting. + +The password of the private key in the key store file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] +===== `ssl_keystore_location` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore path. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value type is <> + * There is no default value for this setting. + +The keystore type. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] +===== `ssl_truststore_location` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore path to validate the Kafka broker's certificate. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is <> + * There is no default value for this setting. + +The truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + + * Value type is <> + * There is no default value for this setting. + +The truststore type. + +[id="{version}-plugins-{type}s-{plugin}-timeout_ms"] +===== `timeout_ms` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `30000` + +The configuration controls the maximum amount of time the server will wait for acknowledgments +from followers to meet the acknowledgment requirements the producer has specified with the +acks configuration. If the requested number of acknowledgments are not met when the timeout +elapses an error will be returned. This timeout is measured on the server side and does not +include the network latency of the request. + +[id="{version}-plugins-{type}s-{plugin}-topic_id"] +===== `topic_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The topic to produce messages to + +[id="{version}-plugins-{type}s-{plugin}-value_serializer"] +===== `value_serializer` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` + +Serializer class for the value of the message + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/kafka-v7.0.0.asciidoc b/docs/versioned-plugins/outputs/kafka-v7.0.0.asciidoc new file mode 100644 index 000000000..4d8df2297 --- /dev/null +++ b/docs/versioned-plugins/outputs/kafka-v7.0.0.asciidoc @@ -0,0 +1,418 @@ +:plugin: kafka +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.0.0 +:release_date: 2017-08-01 +:changelog_url: https://github.com/logstash-plugins/logstash-output-kafka/blob/v7.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Kafka output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on +the broker. + +Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination +of Logstash and the Kafka output plugin: + +[options="header"] +|========================================================== +|Kafka Client Version |Logstash Version |Plugin Version |Why? +|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular +|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) +|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) +|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker +|0.10.1.x |2.4.x - 5.x.x | 6.x.x | +|0.11.0.0 |2.4.x - 5.x.x | 6.2.2 |Not compatible with the <= 0.9 broker +|========================================================== + +NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should +upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker +is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. + +This output supports connecting to Kafka over: + +* SSL (requires plugin version 3.0.0 or later) +* Kerberos SASL (requires plugin version 5.1.0 or later) + +By default security is disabled but can be turned on as needed. + +The only required configuration is the topic_id. The default codec is plain, +so events will be persisted on the broker in plain format. Logstash will encode your messages with not +only the message but also with a timestamp and hostname. If you do not want anything but your message +passing through, you should make the output configuration something like: +[source,ruby] + output { + kafka { + codec => plain { + format => "%{message}" + } + topic_id => "mytopic" + } + } +For more information see http://kafka.apache.org/documentation.html#theproducer + +Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kafka Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-acks>> |<>, one of `["0", "1", "all"]`|No +| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-buffer_memory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-compression_type>> |<>, one of `["none", "gzip", "snappy", "lz4"]`|No +| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-key_serializer>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-linger_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_request_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No +| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topic_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-value_serializer>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-acks"] +===== `acks` + + * Value can be any of: `0`, `1`, `all` + * Default value is `"1"` + +The number of acknowledgments the producer requires the leader to have received +before considering a request complete. + +acks=0, the producer will not wait for any acknowledgment from the server at all. +acks=1, This will mean the leader will write the record to its local log but + will respond without awaiting full acknowledgement from all followers. +acks=all, This means the leader will wait for the full set of in-sync replicas to acknowledge the record. + +[id="{version}-plugins-{type}s-{plugin}-batch_size"] +===== `batch_size` + + * Value type is <> + * Default value is `16384` + +The producer will attempt to batch records together into fewer requests whenever multiple +records are being sent to the same partition. This helps performance on both the client +and the server. This configuration controls the default batch size in bytes. + +[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] +===== `bootstrap_servers` + + * Value type is <> + * Default value is `"localhost:9092"` + +This is for bootstrapping and the producer will only use it for getting metadata (topics, +partitions and replicas). The socket connections for sending the actual data will be +established based on the broker information returned in the metadata. The format is +`host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a +subset of brokers. + +[id="{version}-plugins-{type}s-{plugin}-buffer_memory"] +===== `buffer_memory` + + * Value type is <> + * Default value is `33554432` + +The total bytes of memory the producer can use to buffer records waiting to be sent to the server. + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * Value type is <> + * There is no default value for this setting. + +The id string to pass to the server when making requests. +The purpose of this is to be able to track the source of requests beyond just +ip/port by allowing a logical application name to be included with the request + +[id="{version}-plugins-{type}s-{plugin}-compression_type"] +===== `compression_type` + + * Value can be any of: `none`, `gzip`, `snappy`, `lz4` + * Default value is `"none"` + +The compression type for all data generated by the producer. +The default is none (i.e. no compression). Valid values are none, gzip, or snappy. + +[id="{version}-plugins-{type}s-{plugin}-jaas_path"] +===== `jaas_path` + + * Value type is <> + * There is no default value for this setting. + +The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization +services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: +[source,java] +---------------------------------- +KafkaClient { + com.sun.security.auth.module.Krb5LoginModule required + useTicketCache=true + renewTicket=true + serviceName="kafka"; + }; +---------------------------------- + +Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these +to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same +`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on +different JVM instances. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] +===== `kerberos_config` + + * Value type is <> + * There is no default value for this setting. + +Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html + +[id="{version}-plugins-{type}s-{plugin}-key_serializer"] +===== `key_serializer` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` + +Serializer class for the key of the message + +[id="{version}-plugins-{type}s-{plugin}-linger_ms"] +===== `linger_ms` + + * Value type is <> + * Default value is `0` + +The producer groups together any records that arrive in between request +transmissions into a single batched request. Normally this occurs only under +load when records arrive faster than they can be sent out. However in some circumstances +the client may want to reduce the number of requests even under moderate load. +This setting accomplishes this by adding a small amount of artificial delay—that is, +rather than immediately sending out a record the producer will wait for up to the given delay +to allow other records to be sent so that the sends can be batched together. + +[id="{version}-plugins-{type}s-{plugin}-max_request_size"] +===== `max_request_size` + + * Value type is <> + * Default value is `1048576` + +The maximum size of a request + +[id="{version}-plugins-{type}s-{plugin}-message_key"] +===== `message_key` + + * Value type is <> + * There is no default value for this setting. + +The key for the message + +[id="{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"] +===== `metadata_fetch_timeout_ms` + + * Value type is <> + * Default value is `60000` + +the timeout setting for initial metadata request to fetch topic metadata. + +[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] +===== `metadata_max_age_ms` + + * Value type is <> + * Default value is `300000` + +the max time in milliseconds before a metadata refresh is forced. + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * Default value is `32768` + +The size of the TCP receive buffer to use when reading data + +[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] +===== `reconnect_backoff_ms` + + * Value type is <> + * Default value is `10` + +The amount of time to wait before attempting to reconnect to a given host when a connection fails. + +[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] +===== `request_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The configuration controls the maximum amount of time the client will wait +for the response of a request. If the response is not received before the timeout +elapses the client will resend the request if necessary or fail the request if +retries are exhausted. + +[id="{version}-plugins-{type}s-{plugin}-retries"] +===== `retries` + + * Value type is <> + * Default value is `0` + +Setting a value greater than zero will cause the client to +resend any record whose send fails with a potentially transient error. + +[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] +===== `retry_backoff_ms` + + * Value type is <> + * Default value is `100` + +The amount of time to wait before attempting to retry a failed produce request to a given topic partition. + +[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] +===== `sasl_kerberos_service_name` + + * Value type is <> + * There is no default value for this setting. + +The Kerberos principal name that Kafka broker runs as. +This can be defined either in Kafka's JAAS config or in Kafka's config. + +[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] +===== `sasl_mechanism` + + * Value type is <> + * Default value is `"GSSAPI"` + +http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. +This may be any mechanism for which a security provider is available. +GSSAPI is the default mechanism. + +[id="{version}-plugins-{type}s-{plugin}-security_protocol"] +===== `security_protocol` + + * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` + * Default value is `"PLAINTEXT"` + +Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL + +[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] +===== `send_buffer_bytes` + + * Value type is <> + * Default value is `131072` + +The size of the TCP send buffer to use when sending data. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] +===== `ssl_key_password` + + * Value type is <> + * There is no default value for this setting. + +The password of the private key in the key store file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] +===== `ssl_keystore_location` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore path. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value type is <> + * There is no default value for this setting. + +The keystore type. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] +===== `ssl_truststore_location` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore path to validate the Kafka broker's certificate. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is <> + * There is no default value for this setting. + +The truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + + * Value type is <> + * There is no default value for this setting. + +The truststore type. + +[id="{version}-plugins-{type}s-{plugin}-topic_id"] +===== `topic_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The topic to produce messages to + +[id="{version}-plugins-{type}s-{plugin}-value_serializer"] +===== `value_serializer` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` + +Serializer class for the value of the message + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/kafka-v7.0.1.asciidoc b/docs/versioned-plugins/outputs/kafka-v7.0.1.asciidoc new file mode 100644 index 000000000..3e95a9d75 --- /dev/null +++ b/docs/versioned-plugins/outputs/kafka-v7.0.1.asciidoc @@ -0,0 +1,418 @@ +:plugin: kafka +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.0.1 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-kafka/blob/v7.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Kafka output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on +the broker. + +Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination +of Logstash and the Kafka output plugin: + +[options="header"] +|========================================================== +|Kafka Client Version |Logstash Version |Plugin Version |Why? +|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular +|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) +|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) +|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker +|0.10.1.x |2.4.x - 5.x.x | 6.x.x | +|0.11.0.0 |2.4.x - 5.x.x | 6.2.2 |Not compatible with the <= 0.9 broker +|========================================================== + +NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should +upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker +is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. + +This output supports connecting to Kafka over: + +* SSL (requires plugin version 3.0.0 or later) +* Kerberos SASL (requires plugin version 5.1.0 or later) + +By default security is disabled but can be turned on as needed. + +The only required configuration is the topic_id. The default codec is plain, +so events will be persisted on the broker in plain format. Logstash will encode your messages with not +only the message but also with a timestamp and hostname. If you do not want anything but your message +passing through, you should make the output configuration something like: +[source,ruby] + output { + kafka { + codec => plain { + format => "%{message}" + } + topic_id => "mytopic" + } + } +For more information see http://kafka.apache.org/documentation.html#theproducer + +Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kafka Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-acks>> |<>, one of `["0", "1", "all"]`|No +| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-buffer_memory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-compression_type>> |<>, one of `["none", "gzip", "snappy", "lz4"]`|No +| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-key_serializer>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-linger_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_request_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No +| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topic_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-value_serializer>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-acks"] +===== `acks` + + * Value can be any of: `0`, `1`, `all` + * Default value is `"1"` + +The number of acknowledgments the producer requires the leader to have received +before considering a request complete. + +acks=0, the producer will not wait for any acknowledgment from the server at all. +acks=1, This will mean the leader will write the record to its local log but + will respond without awaiting full acknowledgement from all followers. +acks=all, This means the leader will wait for the full set of in-sync replicas to acknowledge the record. + +[id="{version}-plugins-{type}s-{plugin}-batch_size"] +===== `batch_size` + + * Value type is <> + * Default value is `16384` + +The producer will attempt to batch records together into fewer requests whenever multiple +records are being sent to the same partition. This helps performance on both the client +and the server. This configuration controls the default batch size in bytes. + +[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] +===== `bootstrap_servers` + + * Value type is <> + * Default value is `"localhost:9092"` + +This is for bootstrapping and the producer will only use it for getting metadata (topics, +partitions and replicas). The socket connections for sending the actual data will be +established based on the broker information returned in the metadata. The format is +`host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a +subset of brokers. + +[id="{version}-plugins-{type}s-{plugin}-buffer_memory"] +===== `buffer_memory` + + * Value type is <> + * Default value is `33554432` + +The total bytes of memory the producer can use to buffer records waiting to be sent to the server. + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * Value type is <> + * There is no default value for this setting. + +The id string to pass to the server when making requests. +The purpose of this is to be able to track the source of requests beyond just +ip/port by allowing a logical application name to be included with the request + +[id="{version}-plugins-{type}s-{plugin}-compression_type"] +===== `compression_type` + + * Value can be any of: `none`, `gzip`, `snappy`, `lz4` + * Default value is `"none"` + +The compression type for all data generated by the producer. +The default is none (i.e. no compression). Valid values are none, gzip, or snappy. + +[id="{version}-plugins-{type}s-{plugin}-jaas_path"] +===== `jaas_path` + + * Value type is <> + * There is no default value for this setting. + +The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization +services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: +[source,java] +---------------------------------- +KafkaClient { + com.sun.security.auth.module.Krb5LoginModule required + useTicketCache=true + renewTicket=true + serviceName="kafka"; + }; +---------------------------------- + +Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these +to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same +`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on +different JVM instances. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] +===== `kerberos_config` + + * Value type is <> + * There is no default value for this setting. + +Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html + +[id="{version}-plugins-{type}s-{plugin}-key_serializer"] +===== `key_serializer` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` + +Serializer class for the key of the message + +[id="{version}-plugins-{type}s-{plugin}-linger_ms"] +===== `linger_ms` + + * Value type is <> + * Default value is `0` + +The producer groups together any records that arrive in between request +transmissions into a single batched request. Normally this occurs only under +load when records arrive faster than they can be sent out. However in some circumstances +the client may want to reduce the number of requests even under moderate load. +This setting accomplishes this by adding a small amount of artificial delay—that is, +rather than immediately sending out a record the producer will wait for up to the given delay +to allow other records to be sent so that the sends can be batched together. + +[id="{version}-plugins-{type}s-{plugin}-max_request_size"] +===== `max_request_size` + + * Value type is <> + * Default value is `1048576` + +The maximum size of a request + +[id="{version}-plugins-{type}s-{plugin}-message_key"] +===== `message_key` + + * Value type is <> + * There is no default value for this setting. + +The key for the message + +[id="{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"] +===== `metadata_fetch_timeout_ms` + + * Value type is <> + * Default value is `60000` + +the timeout setting for initial metadata request to fetch topic metadata. + +[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] +===== `metadata_max_age_ms` + + * Value type is <> + * Default value is `300000` + +the max time in milliseconds before a metadata refresh is forced. + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * Default value is `32768` + +The size of the TCP receive buffer to use when reading data + +[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] +===== `reconnect_backoff_ms` + + * Value type is <> + * Default value is `10` + +The amount of time to wait before attempting to reconnect to a given host when a connection fails. + +[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] +===== `request_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The configuration controls the maximum amount of time the client will wait +for the response of a request. If the response is not received before the timeout +elapses the client will resend the request if necessary or fail the request if +retries are exhausted. + +[id="{version}-plugins-{type}s-{plugin}-retries"] +===== `retries` + + * Value type is <> + * Default value is `0` + +Setting a value greater than zero will cause the client to +resend any record whose send fails with a potentially transient error. + +[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] +===== `retry_backoff_ms` + + * Value type is <> + * Default value is `100` + +The amount of time to wait before attempting to retry a failed produce request to a given topic partition. + +[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] +===== `sasl_kerberos_service_name` + + * Value type is <> + * There is no default value for this setting. + +The Kerberos principal name that Kafka broker runs as. +This can be defined either in Kafka's JAAS config or in Kafka's config. + +[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] +===== `sasl_mechanism` + + * Value type is <> + * Default value is `"GSSAPI"` + +http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. +This may be any mechanism for which a security provider is available. +GSSAPI is the default mechanism. + +[id="{version}-plugins-{type}s-{plugin}-security_protocol"] +===== `security_protocol` + + * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` + * Default value is `"PLAINTEXT"` + +Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL + +[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] +===== `send_buffer_bytes` + + * Value type is <> + * Default value is `131072` + +The size of the TCP send buffer to use when sending data. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] +===== `ssl_key_password` + + * Value type is <> + * There is no default value for this setting. + +The password of the private key in the key store file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] +===== `ssl_keystore_location` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore path. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value type is <> + * There is no default value for this setting. + +The keystore type. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] +===== `ssl_truststore_location` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore path to validate the Kafka broker's certificate. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is <> + * There is no default value for this setting. + +The truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + + * Value type is <> + * There is no default value for this setting. + +The truststore type. + +[id="{version}-plugins-{type}s-{plugin}-topic_id"] +===== `topic_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The topic to produce messages to + +[id="{version}-plugins-{type}s-{plugin}-value_serializer"] +===== `value_serializer` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` + +Serializer class for the value of the message + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/kafka-v7.0.3.asciidoc b/docs/versioned-plugins/outputs/kafka-v7.0.3.asciidoc new file mode 100644 index 000000000..06e3a8711 --- /dev/null +++ b/docs/versioned-plugins/outputs/kafka-v7.0.3.asciidoc @@ -0,0 +1,425 @@ +:plugin: kafka +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.0.3 +:release_date: 2017-10-09 +:changelog_url: https://github.com/logstash-plugins/logstash-output-kafka/blob/v7.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Kafka output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on +the broker. + +Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination +of Logstash and the Kafka output plugin: + +[options="header"] +|========================================================== +|Kafka Client Version |Logstash Version |Plugin Version |Why? +|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular +|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) +|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) +|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker +|0.10.1.x |2.4.x - 5.x.x | 6.x.x | +|0.11.0.0 |2.4.x - 5.x.x | 6.2.2 |Not compatible with the <= 0.9 broker +|========================================================== + +NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should +upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker +is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. + +This output supports connecting to Kafka over: + +* SSL (requires plugin version 3.0.0 or later) +* Kerberos SASL (requires plugin version 5.1.0 or later) + +By default security is disabled but can be turned on as needed. + +The only required configuration is the topic_id. + +The default codec is plain. Logstash will encode your events with not only the message field but also with a timestamp and hostname. + +If you want the full content of your events to be sent as json, you should set the codec in the output configuration like this: +[source,ruby] + output { + kafka { + codec => json + topic_id => "mytopic" + } + } + +For more information see http://kafka.apache.org/documentation.html#theproducer + +Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kafka Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-acks>> |<>, one of `["0", "1", "all"]`|No +| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-buffer_memory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-compression_type>> |<>, one of `["none", "gzip", "snappy", "lz4"]`|No +| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-key_serializer>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-linger_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_request_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No +| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topic_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-value_serializer>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-acks"] +===== `acks` + + * Value can be any of: `0`, `1`, `all` + * Default value is `"1"` + +The number of acknowledgments the producer requires the leader to have received +before considering a request complete. + +acks=0, the producer will not wait for any acknowledgment from the server at all. +acks=1, This will mean the leader will write the record to its local log but + will respond without awaiting full acknowledgement from all followers. +acks=all, This means the leader will wait for the full set of in-sync replicas to acknowledge the record. + +[id="{version}-plugins-{type}s-{plugin}-batch_size"] +===== `batch_size` + + * Value type is <> + * Default value is `16384` + +The producer will attempt to batch records together into fewer requests whenever multiple +records are being sent to the same partition. This helps performance on both the client +and the server. This configuration controls the default batch size in bytes. + +[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] +===== `bootstrap_servers` + + * Value type is <> + * Default value is `"localhost:9092"` + +This is for bootstrapping and the producer will only use it for getting metadata (topics, +partitions and replicas). The socket connections for sending the actual data will be +established based on the broker information returned in the metadata. The format is +`host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a +subset of brokers. + +[id="{version}-plugins-{type}s-{plugin}-buffer_memory"] +===== `buffer_memory` + + * Value type is <> + * Default value is `33554432` + +The total bytes of memory the producer can use to buffer records waiting to be sent to the server. + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * Value type is <> + * There is no default value for this setting. + +The id string to pass to the server when making requests. +The purpose of this is to be able to track the source of requests beyond just +ip/port by allowing a logical application name to be included with the request + +[id="{version}-plugins-{type}s-{plugin}-compression_type"] +===== `compression_type` + + * Value can be any of: `none`, `gzip`, `snappy`, `lz4` + * Default value is `"none"` + +The compression type for all data generated by the producer. +The default is none (i.e. no compression). Valid values are none, gzip, or snappy. + +[id="{version}-plugins-{type}s-{plugin}-jaas_path"] +===== `jaas_path` + + * Value type is <> + * There is no default value for this setting. + +The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization +services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: +[source,java] +---------------------------------- +KafkaClient { + com.sun.security.auth.module.Krb5LoginModule required + useTicketCache=true + renewTicket=true + serviceName="kafka"; + }; +---------------------------------- + +Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these +to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same +`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on +different JVM instances. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] +===== `kerberos_config` + + * Value type is <> + * There is no default value for this setting. + +Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html + +[id="{version}-plugins-{type}s-{plugin}-key_serializer"] +===== `key_serializer` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` + +Serializer class for the key of the message + +[id="{version}-plugins-{type}s-{plugin}-linger_ms"] +===== `linger_ms` + + * Value type is <> + * Default value is `0` + +The producer groups together any records that arrive in between request +transmissions into a single batched request. Normally this occurs only under +load when records arrive faster than they can be sent out. However in some circumstances +the client may want to reduce the number of requests even under moderate load. +This setting accomplishes this by adding a small amount of artificial delay—that is, +rather than immediately sending out a record the producer will wait for up to the given delay +to allow other records to be sent so that the sends can be batched together. + +[id="{version}-plugins-{type}s-{plugin}-max_request_size"] +===== `max_request_size` + + * Value type is <> + * Default value is `1048576` + +The maximum size of a request + +[id="{version}-plugins-{type}s-{plugin}-message_key"] +===== `message_key` + + * Value type is <> + * There is no default value for this setting. + +The key for the message + +[id="{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"] +===== `metadata_fetch_timeout_ms` + + * Value type is <> + * Default value is `60000` + +the timeout setting for initial metadata request to fetch topic metadata. + +[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] +===== `metadata_max_age_ms` + + * Value type is <> + * Default value is `300000` + +the max time in milliseconds before a metadata refresh is forced. + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * Default value is `32768` + +The size of the TCP receive buffer to use when reading data + +[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] +===== `reconnect_backoff_ms` + + * Value type is <> + * Default value is `10` + +The amount of time to wait before attempting to reconnect to a given host when a connection fails. + +[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] +===== `request_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The configuration controls the maximum amount of time the client will wait +for the response of a request. If the response is not received before the timeout +elapses the client will resend the request if necessary or fail the request if +retries are exhausted. + +[id="{version}-plugins-{type}s-{plugin}-retries"] +===== `retries` + + * Value type is <> + * There is no default value for this setting. + +The default retry behavior is to retry until successful. To prevent data loss, +the use of this setting is discouraged. + +If you choose to set `retries`, a value greater than zero will cause the +client to only retry a fixed number of times. This will result in data loss +if a transport fault exists for longer than your retry count (network outage, +Kafka down, etc). + +A value less than zero is a configuration error. + +[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] +===== `retry_backoff_ms` + + * Value type is <> + * Default value is `100` + +The amount of time to wait before attempting to retry a failed produce request to a given topic partition. + +[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] +===== `sasl_kerberos_service_name` + + * Value type is <> + * There is no default value for this setting. + +The Kerberos principal name that Kafka broker runs as. +This can be defined either in Kafka's JAAS config or in Kafka's config. + +[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] +===== `sasl_mechanism` + + * Value type is <> + * Default value is `"GSSAPI"` + +http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. +This may be any mechanism for which a security provider is available. +GSSAPI is the default mechanism. + +[id="{version}-plugins-{type}s-{plugin}-security_protocol"] +===== `security_protocol` + + * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` + * Default value is `"PLAINTEXT"` + +Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL + +[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] +===== `send_buffer_bytes` + + * Value type is <> + * Default value is `131072` + +The size of the TCP send buffer to use when sending data. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] +===== `ssl_key_password` + + * Value type is <> + * There is no default value for this setting. + +The password of the private key in the key store file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] +===== `ssl_keystore_location` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore path. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value type is <> + * There is no default value for this setting. + +The keystore type. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] +===== `ssl_truststore_location` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore path to validate the Kafka broker's certificate. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is <> + * There is no default value for this setting. + +The truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + + * Value type is <> + * There is no default value for this setting. + +The truststore type. + +[id="{version}-plugins-{type}s-{plugin}-topic_id"] +===== `topic_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The topic to produce messages to + +[id="{version}-plugins-{type}s-{plugin}-value_serializer"] +===== `value_serializer` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` + +Serializer class for the value of the message + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/kafka-v7.0.4.asciidoc b/docs/versioned-plugins/outputs/kafka-v7.0.4.asciidoc new file mode 100644 index 000000000..3f6c27a91 --- /dev/null +++ b/docs/versioned-plugins/outputs/kafka-v7.0.4.asciidoc @@ -0,0 +1,425 @@ +:plugin: kafka +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.0.4 +:release_date: 2017-10-25 +:changelog_url: https://github.com/logstash-plugins/logstash-output-kafka/blob/v7.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Kafka output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on +the broker. + +Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination +of Logstash and the Kafka output plugin: + +[options="header"] +|========================================================== +|Kafka Client Version |Logstash Version |Plugin Version |Why? +|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular +|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) +|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) +|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker +|0.10.1.x |2.4.x - 5.x.x | 6.x.x | +|0.11.0.0 |2.4.x - 5.x.x | 6.2.2 |Not compatible with the <= 0.9 broker +|========================================================== + +NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should +upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker +is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. + +This output supports connecting to Kafka over: + +* SSL (requires plugin version 3.0.0 or later) +* Kerberos SASL (requires plugin version 5.1.0 or later) + +By default security is disabled but can be turned on as needed. + +The only required configuration is the topic_id. + +The default codec is plain. Logstash will encode your events with not only the message field but also with a timestamp and hostname. + +If you want the full content of your events to be sent as json, you should set the codec in the output configuration like this: +[source,ruby] + output { + kafka { + codec => json + topic_id => "mytopic" + } + } + +For more information see http://kafka.apache.org/documentation.html#theproducer + +Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kafka Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-acks>> |<>, one of `["0", "1", "all"]`|No +| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-buffer_memory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-compression_type>> |<>, one of `["none", "gzip", "snappy", "lz4"]`|No +| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-key_serializer>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-linger_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_request_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No +| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topic_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-value_serializer>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-acks"] +===== `acks` + + * Value can be any of: `0`, `1`, `all` + * Default value is `"1"` + +The number of acknowledgments the producer requires the leader to have received +before considering a request complete. + +acks=0, the producer will not wait for any acknowledgment from the server at all. +acks=1, This will mean the leader will write the record to its local log but + will respond without awaiting full acknowledgement from all followers. +acks=all, This means the leader will wait for the full set of in-sync replicas to acknowledge the record. + +[id="{version}-plugins-{type}s-{plugin}-batch_size"] +===== `batch_size` + + * Value type is <> + * Default value is `16384` + +The producer will attempt to batch records together into fewer requests whenever multiple +records are being sent to the same partition. This helps performance on both the client +and the server. This configuration controls the default batch size in bytes. + +[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] +===== `bootstrap_servers` + + * Value type is <> + * Default value is `"localhost:9092"` + +This is for bootstrapping and the producer will only use it for getting metadata (topics, +partitions and replicas). The socket connections for sending the actual data will be +established based on the broker information returned in the metadata. The format is +`host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a +subset of brokers. + +[id="{version}-plugins-{type}s-{plugin}-buffer_memory"] +===== `buffer_memory` + + * Value type is <> + * Default value is `33554432` + +The total bytes of memory the producer can use to buffer records waiting to be sent to the server. + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * Value type is <> + * There is no default value for this setting. + +The id string to pass to the server when making requests. +The purpose of this is to be able to track the source of requests beyond just +ip/port by allowing a logical application name to be included with the request + +[id="{version}-plugins-{type}s-{plugin}-compression_type"] +===== `compression_type` + + * Value can be any of: `none`, `gzip`, `snappy`, `lz4` + * Default value is `"none"` + +The compression type for all data generated by the producer. +The default is none (i.e. no compression). Valid values are none, gzip, or snappy. + +[id="{version}-plugins-{type}s-{plugin}-jaas_path"] +===== `jaas_path` + + * Value type is <> + * There is no default value for this setting. + +The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization +services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: +[source,java] +---------------------------------- +KafkaClient { + com.sun.security.auth.module.Krb5LoginModule required + useTicketCache=true + renewTicket=true + serviceName="kafka"; + }; +---------------------------------- + +Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these +to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same +`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on +different JVM instances. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] +===== `kerberos_config` + + * Value type is <> + * There is no default value for this setting. + +Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html + +[id="{version}-plugins-{type}s-{plugin}-key_serializer"] +===== `key_serializer` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` + +Serializer class for the key of the message + +[id="{version}-plugins-{type}s-{plugin}-linger_ms"] +===== `linger_ms` + + * Value type is <> + * Default value is `0` + +The producer groups together any records that arrive in between request +transmissions into a single batched request. Normally this occurs only under +load when records arrive faster than they can be sent out. However in some circumstances +the client may want to reduce the number of requests even under moderate load. +This setting accomplishes this by adding a small amount of artificial delay—that is, +rather than immediately sending out a record the producer will wait for up to the given delay +to allow other records to be sent so that the sends can be batched together. + +[id="{version}-plugins-{type}s-{plugin}-max_request_size"] +===== `max_request_size` + + * Value type is <> + * Default value is `1048576` + +The maximum size of a request + +[id="{version}-plugins-{type}s-{plugin}-message_key"] +===== `message_key` + + * Value type is <> + * There is no default value for this setting. + +The key for the message + +[id="{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"] +===== `metadata_fetch_timeout_ms` + + * Value type is <> + * Default value is `60000` + +the timeout setting for initial metadata request to fetch topic metadata. + +[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] +===== `metadata_max_age_ms` + + * Value type is <> + * Default value is `300000` + +the max time in milliseconds before a metadata refresh is forced. + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * Default value is `32768` + +The size of the TCP receive buffer to use when reading data + +[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] +===== `reconnect_backoff_ms` + + * Value type is <> + * Default value is `10` + +The amount of time to wait before attempting to reconnect to a given host when a connection fails. + +[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] +===== `request_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The configuration controls the maximum amount of time the client will wait +for the response of a request. If the response is not received before the timeout +elapses the client will resend the request if necessary or fail the request if +retries are exhausted. + +[id="{version}-plugins-{type}s-{plugin}-retries"] +===== `retries` + + * Value type is <> + * There is no default value for this setting. + +The default retry behavior is to retry until successful. To prevent data loss, +the use of this setting is discouraged. + +If you choose to set `retries`, a value greater than zero will cause the +client to only retry a fixed number of times. This will result in data loss +if a transport fault exists for longer than your retry count (network outage, +Kafka down, etc). + +A value less than zero is a configuration error. + +[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] +===== `retry_backoff_ms` + + * Value type is <> + * Default value is `100` + +The amount of time to wait before attempting to retry a failed produce request to a given topic partition. + +[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] +===== `sasl_kerberos_service_name` + + * Value type is <> + * There is no default value for this setting. + +The Kerberos principal name that Kafka broker runs as. +This can be defined either in Kafka's JAAS config or in Kafka's config. + +[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] +===== `sasl_mechanism` + + * Value type is <> + * Default value is `"GSSAPI"` + +http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. +This may be any mechanism for which a security provider is available. +GSSAPI is the default mechanism. + +[id="{version}-plugins-{type}s-{plugin}-security_protocol"] +===== `security_protocol` + + * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` + * Default value is `"PLAINTEXT"` + +Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL + +[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] +===== `send_buffer_bytes` + + * Value type is <> + * Default value is `131072` + +The size of the TCP send buffer to use when sending data. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] +===== `ssl_key_password` + + * Value type is <> + * There is no default value for this setting. + +The password of the private key in the key store file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] +===== `ssl_keystore_location` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore path. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value type is <> + * There is no default value for this setting. + +The keystore type. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] +===== `ssl_truststore_location` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore path to validate the Kafka broker's certificate. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is <> + * There is no default value for this setting. + +The truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + + * Value type is <> + * There is no default value for this setting. + +The truststore type. + +[id="{version}-plugins-{type}s-{plugin}-topic_id"] +===== `topic_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The topic to produce messages to + +[id="{version}-plugins-{type}s-{plugin}-value_serializer"] +===== `value_serializer` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` + +Serializer class for the value of the message + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/kafka-v7.0.6.asciidoc b/docs/versioned-plugins/outputs/kafka-v7.0.6.asciidoc new file mode 100644 index 000000000..98a8d71e8 --- /dev/null +++ b/docs/versioned-plugins/outputs/kafka-v7.0.6.asciidoc @@ -0,0 +1,425 @@ +:plugin: kafka +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v7.0.6 +:release_date: 2018-01-05 +:changelog_url: https://github.com/logstash-plugins/logstash-output-kafka/blob/v7.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Kafka output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on +the broker. + +Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination +of Logstash and the Kafka output plugin: + +[options="header"] +|========================================================== +|Kafka Client Version |Logstash Version |Plugin Version |Why? +|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular +|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) +|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) +|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker +|0.10.1.x |2.4.x - 5.x.x | 6.x.x | +|0.11.0.0 |2.4.x - 5.x.x | 6.2.2 |Not compatible with the <= 0.9 broker +|========================================================== + +NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should +upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker +is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. + +This output supports connecting to Kafka over: + +* SSL (requires plugin version 3.0.0 or later) +* Kerberos SASL (requires plugin version 5.1.0 or later) + +By default security is disabled but can be turned on as needed. + +The only required configuration is the topic_id. + +The default codec is plain. Logstash will encode your events with not only the message field but also with a timestamp and hostname. + +If you want the full content of your events to be sent as json, you should set the codec in the output configuration like this: +[source,ruby] + output { + kafka { + codec => json + topic_id => "mytopic" + } + } + +For more information see http://kafka.apache.org/documentation.html#theproducer + +Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Kafka Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-acks>> |<>, one of `["0", "1", "all"]`|No +| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-buffer_memory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-compression_type>> |<>, one of `["none", "gzip", "snappy", "lz4"]`|No +| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-key_serializer>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-linger_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-max_request_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No +| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topic_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-value_serializer>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-acks"] +===== `acks` + + * Value can be any of: `0`, `1`, `all` + * Default value is `"1"` + +The number of acknowledgments the producer requires the leader to have received +before considering a request complete. + +acks=0, the producer will not wait for any acknowledgment from the server at all. +acks=1, This will mean the leader will write the record to its local log but + will respond without awaiting full acknowledgement from all followers. +acks=all, This means the leader will wait for the full set of in-sync replicas to acknowledge the record. + +[id="{version}-plugins-{type}s-{plugin}-batch_size"] +===== `batch_size` + + * Value type is <> + * Default value is `16384` + +The producer will attempt to batch records together into fewer requests whenever multiple +records are being sent to the same partition. This helps performance on both the client +and the server. This configuration controls the default batch size in bytes. + +[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] +===== `bootstrap_servers` + + * Value type is <> + * Default value is `"localhost:9092"` + +This is for bootstrapping and the producer will only use it for getting metadata (topics, +partitions and replicas). The socket connections for sending the actual data will be +established based on the broker information returned in the metadata. The format is +`host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a +subset of brokers. + +[id="{version}-plugins-{type}s-{plugin}-buffer_memory"] +===== `buffer_memory` + + * Value type is <> + * Default value is `33554432` + +The total bytes of memory the producer can use to buffer records waiting to be sent to the server. + +[id="{version}-plugins-{type}s-{plugin}-client_id"] +===== `client_id` + + * Value type is <> + * There is no default value for this setting. + +The id string to pass to the server when making requests. +The purpose of this is to be able to track the source of requests beyond just +ip/port by allowing a logical application name to be included with the request + +[id="{version}-plugins-{type}s-{plugin}-compression_type"] +===== `compression_type` + + * Value can be any of: `none`, `gzip`, `snappy`, `lz4` + * Default value is `"none"` + +The compression type for all data generated by the producer. +The default is none (i.e. no compression). Valid values are none, gzip, or snappy. + +[id="{version}-plugins-{type}s-{plugin}-jaas_path"] +===== `jaas_path` + + * Value type is <> + * There is no default value for this setting. + +The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization +services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: +[source,java] +---------------------------------- +KafkaClient { + com.sun.security.auth.module.Krb5LoginModule required + useTicketCache=true + renewTicket=true + serviceName="kafka"; + }; +---------------------------------- + +Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these +to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same +`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on +different JVM instances. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] +===== `kerberos_config` + + * Value type is <> + * There is no default value for this setting. + +Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html + +[id="{version}-plugins-{type}s-{plugin}-key_serializer"] +===== `key_serializer` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` + +Serializer class for the key of the message + +[id="{version}-plugins-{type}s-{plugin}-linger_ms"] +===== `linger_ms` + + * Value type is <> + * Default value is `0` + +The producer groups together any records that arrive in between request +transmissions into a single batched request. Normally this occurs only under +load when records arrive faster than they can be sent out. However in some circumstances +the client may want to reduce the number of requests even under moderate load. +This setting accomplishes this by adding a small amount of artificial delay—that is, +rather than immediately sending out a record the producer will wait for up to the given delay +to allow other records to be sent so that the sends can be batched together. + +[id="{version}-plugins-{type}s-{plugin}-max_request_size"] +===== `max_request_size` + + * Value type is <> + * Default value is `1048576` + +The maximum size of a request + +[id="{version}-plugins-{type}s-{plugin}-message_key"] +===== `message_key` + + * Value type is <> + * There is no default value for this setting. + +The key for the message + +[id="{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"] +===== `metadata_fetch_timeout_ms` + + * Value type is <> + * Default value is `60000` + +the timeout setting for initial metadata request to fetch topic metadata. + +[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] +===== `metadata_max_age_ms` + + * Value type is <> + * Default value is `300000` + +the max time in milliseconds before a metadata refresh is forced. + +[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] +===== `receive_buffer_bytes` + + * Value type is <> + * Default value is `32768` + +The size of the TCP receive buffer to use when reading data + +[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] +===== `reconnect_backoff_ms` + + * Value type is <> + * Default value is `10` + +The amount of time to wait before attempting to reconnect to a given host when a connection fails. + +[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] +===== `request_timeout_ms` + + * Value type is <> + * There is no default value for this setting. + +The configuration controls the maximum amount of time the client will wait +for the response of a request. If the response is not received before the timeout +elapses the client will resend the request if necessary or fail the request if +retries are exhausted. + +[id="{version}-plugins-{type}s-{plugin}-retries"] +===== `retries` + + * Value type is <> + * There is no default value for this setting. + +The default retry behavior is to retry until successful. To prevent data loss, +the use of this setting is discouraged. + +If you choose to set `retries`, a value greater than zero will cause the +client to only retry a fixed number of times. This will result in data loss +if a transport fault exists for longer than your retry count (network outage, +Kafka down, etc). + +A value less than zero is a configuration error. + +[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] +===== `retry_backoff_ms` + + * Value type is <> + * Default value is `100` + +The amount of time to wait before attempting to retry a failed produce request to a given topic partition. + +[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] +===== `sasl_kerberos_service_name` + + * Value type is <> + * There is no default value for this setting. + +The Kerberos principal name that Kafka broker runs as. +This can be defined either in Kafka's JAAS config or in Kafka's config. + +[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] +===== `sasl_mechanism` + + * Value type is <> + * Default value is `"GSSAPI"` + +http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. +This may be any mechanism for which a security provider is available. +GSSAPI is the default mechanism. + +[id="{version}-plugins-{type}s-{plugin}-security_protocol"] +===== `security_protocol` + + * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` + * Default value is `"PLAINTEXT"` + +Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL + +[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] +===== `send_buffer_bytes` + + * Value type is <> + * Default value is `131072` + +The size of the TCP send buffer to use when sending data. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] +===== `ssl_key_password` + + * Value type is <> + * There is no default value for this setting. + +The password of the private key in the key store file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] +===== `ssl_keystore_location` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore path. + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] +===== `ssl_keystore_password` + + * Value type is <> + * There is no default value for this setting. + +If client authentication is required, this setting stores the keystore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] +===== `ssl_keystore_type` + + * Value type is <> + * There is no default value for this setting. + +The keystore type. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] +===== `ssl_truststore_location` + + * Value type is <> + * There is no default value for this setting. + +The JKS truststore path to validate the Kafka broker's certificate. + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] +===== `ssl_truststore_password` + + * Value type is <> + * There is no default value for this setting. + +The truststore password + +[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] +===== `ssl_truststore_type` + + * Value type is <> + * There is no default value for this setting. + +The truststore type. + +[id="{version}-plugins-{type}s-{plugin}-topic_id"] +===== `topic_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The topic to produce messages to + +[id="{version}-plugins-{type}s-{plugin}-value_serializer"] +===== `value_serializer` + + * Value type is <> + * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` + +Serializer class for the value of the message + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/librato-index.asciidoc b/docs/versioned-plugins/outputs/librato-index.asciidoc new file mode 100644 index 000000000..8978e680f --- /dev/null +++ b/docs/versioned-plugins/outputs/librato-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: librato +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::librato-v3.0.5.asciidoc[] +include::librato-v3.0.4.asciidoc[] +include::librato-v3.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/librato-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/librato-v3.0.2.asciidoc new file mode 100644 index 000000000..13c446a57 --- /dev/null +++ b/docs/versioned-plugins/outputs/librato-v3.0.2.asciidoc @@ -0,0 +1,162 @@ +:plugin: librato +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-librato/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Librato output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Librato Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-account_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-annotation>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-api_token>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-counter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-account_id"] +===== `account_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +This output lets you send metrics, annotations and alerts to +Librato based on Logstash events + +This is VERY experimental and inefficient right now. +Your Librato account +usually an email address + +[id="{version}-plugins-{type}s-{plugin}-annotation"] +===== `annotation` + + * Value type is <> + * Default value is `{}` + +Annotations +Registers an annotation with Librato +The only required field is `title` and `name`. +`start_time` and `end_time` will be set to `event.get("@timestamp").to_i` +You can add any other optional annotation values as well. +All values will be passed through `event.sprintf` + +Example: +[source,ruby] + { + "title" => "Logstash event on %{host}" + "name" => "logstash_stream" + } +or +[source,ruby] + { + "title" => "Logstash event" + "description" => "%{message}" + "name" => "logstash_stream" + } + +[id="{version}-plugins-{type}s-{plugin}-api_token"] +===== `api_token` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Librato API Token + +[id="{version}-plugins-{type}s-{plugin}-batch_size"] +===== `batch_size` + + * Value type is <> + * Default value is `"10"` + +Batch size +Number of events to batch up before sending to Librato. + + +[id="{version}-plugins-{type}s-{plugin}-counter"] +===== `counter` + + * Value type is <> + * Default value is `{}` + +Counters +Send data to Librato as a counter + +Example: +[source,ruby] + { + "value" => "1" + "source" => "%{host}" + "name" => "messages_received" + } + +Additionally, you can override the `measure_time` for the event. Must be a unix timestamp: +[source,ruby] + { + "value" => "1" + "source" => "%{host}" + "name" => "messages_received" + "measure_time" => "%{my_unixtime_field}" + } +Default is to use the event's timestamp + +[id="{version}-plugins-{type}s-{plugin}-gauge"] +===== `gauge` + + * Value type is <> + * Default value is `{}` + +Gauges +Send data to Librato as a gauge + +Example: +[source,ruby] + { + "value" => "%{bytes_received}" + "source" => "%{host}" + "name" => "apache_bytes" + } +Additionally, you can override the `measure_time` for the event. Must be a unix timestamp: +[source,ruby] + { + "value" => "%{bytes_received}" + "source" => "%{host}" + "name" => "apache_bytes" + "measure_time" => "%{my_unixtime_field} + } +Default is to use the event's timestamp + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/librato-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/librato-v3.0.4.asciidoc new file mode 100644 index 000000000..fa8f68011 --- /dev/null +++ b/docs/versioned-plugins/outputs/librato-v3.0.4.asciidoc @@ -0,0 +1,162 @@ +:plugin: librato +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-librato/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Librato output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you send metrics, annotations, and alerts to +Librato based on Logstash events + +This is VERY experimental and inefficient right now. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Librato Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-account_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-annotation>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-api_token>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-counter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-account_id"] +===== `account_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Librato account +usually an email address + +[id="{version}-plugins-{type}s-{plugin}-annotation"] +===== `annotation` + + * Value type is <> + * Default value is `{}` + +Annotations +Registers an annotation with Librato +The only required field is `title` and `name`. +`start_time` and `end_time` will be set to `event.get("@timestamp").to_i` +You can add any other optional annotation values as well. +All values will be passed through `event.sprintf` + +Example: +[source,ruby] + { + "title" => "Logstash event on %{host}" + "name" => "logstash_stream" + } +or +[source,ruby] + { + "title" => "Logstash event" + "description" => "%{message}" + "name" => "logstash_stream" + } + +[id="{version}-plugins-{type}s-{plugin}-api_token"] +===== `api_token` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Librato API Token + +[id="{version}-plugins-{type}s-{plugin}-batch_size"] +===== `batch_size` + + * Value type is <> + * Default value is `"10"` + +Batch size +Number of events to batch up before sending to Librato. + + +[id="{version}-plugins-{type}s-{plugin}-counter"] +===== `counter` + + * Value type is <> + * Default value is `{}` + +Counters +Send data to Librato as a counter + +Example: +[source,ruby] + { + "value" => "1" + "source" => "%{host}" + "name" => "messages_received" + } + +Additionally, you can override the `measure_time` for the event. Must be a unix timestamp: +[source,ruby] + { + "value" => "1" + "source" => "%{host}" + "name" => "messages_received" + "measure_time" => "%{my_unixtime_field}" + } +Default is to use the event's timestamp + +[id="{version}-plugins-{type}s-{plugin}-gauge"] +===== `gauge` + + * Value type is <> + * Default value is `{}` + +Gauges +Send data to Librato as a gauge + +Example: +[source,ruby] + { + "value" => "%{bytes_received}" + "source" => "%{host}" + "name" => "apache_bytes" + } +Additionally, you can override the `measure_time` for the event. Must be a unix timestamp: +[source,ruby] + { + "value" => "%{bytes_received}" + "source" => "%{host}" + "name" => "apache_bytes" + "measure_time" => "%{my_unixtime_field} + } +Default is to use the event's timestamp + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/librato-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/librato-v3.0.5.asciidoc new file mode 100644 index 000000000..caeb5bc4d --- /dev/null +++ b/docs/versioned-plugins/outputs/librato-v3.0.5.asciidoc @@ -0,0 +1,162 @@ +:plugin: librato +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-librato/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Librato output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you send metrics, annotations, and alerts to +Librato based on Logstash events + +This is VERY experimental and inefficient right now. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Librato Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-account_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-annotation>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-api_token>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-counter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-account_id"] +===== `account_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Librato account +usually an email address + +[id="{version}-plugins-{type}s-{plugin}-annotation"] +===== `annotation` + + * Value type is <> + * Default value is `{}` + +Annotations +Registers an annotation with Librato +The only required field is `title` and `name`. +`start_time` and `end_time` will be set to `event.get("@timestamp").to_i` +You can add any other optional annotation values as well. +All values will be passed through `event.sprintf` + +Example: +[source,ruby] + { + "title" => "Logstash event on %{host}" + "name" => "logstash_stream" + } +or +[source,ruby] + { + "title" => "Logstash event" + "description" => "%{message}" + "name" => "logstash_stream" + } + +[id="{version}-plugins-{type}s-{plugin}-api_token"] +===== `api_token` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Your Librato API Token + +[id="{version}-plugins-{type}s-{plugin}-batch_size"] +===== `batch_size` + + * Value type is <> + * Default value is `"10"` + +Batch size +Number of events to batch up before sending to Librato. + + +[id="{version}-plugins-{type}s-{plugin}-counter"] +===== `counter` + + * Value type is <> + * Default value is `{}` + +Counters +Send data to Librato as a counter + +Example: +[source,ruby] + { + "value" => "1" + "source" => "%{host}" + "name" => "messages_received" + } + +Additionally, you can override the `measure_time` for the event. Must be a unix timestamp: +[source,ruby] + { + "value" => "1" + "source" => "%{host}" + "name" => "messages_received" + "measure_time" => "%{my_unixtime_field}" + } +Default is to use the event's timestamp + +[id="{version}-plugins-{type}s-{plugin}-gauge"] +===== `gauge` + + * Value type is <> + * Default value is `{}` + +Gauges +Send data to Librato as a gauge + +Example: +[source,ruby] + { + "value" => "%{bytes_received}" + "source" => "%{host}" + "name" => "apache_bytes" + } +Additionally, you can override the `measure_time` for the event. Must be a unix timestamp: +[source,ruby] + { + "value" => "%{bytes_received}" + "source" => "%{host}" + "name" => "apache_bytes" + "measure_time" => "%{my_unixtime_field} + } +Default is to use the event's timestamp + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/logentries-index.asciidoc b/docs/versioned-plugins/outputs/logentries-index.asciidoc new file mode 100644 index 000000000..bd9d46c10 --- /dev/null +++ b/docs/versioned-plugins/outputs/logentries-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: logentries +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/outputs/loggly-index.asciidoc b/docs/versioned-plugins/outputs/loggly-index.asciidoc new file mode 100644 index 000000000..7ed3a311f --- /dev/null +++ b/docs/versioned-plugins/outputs/loggly-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: loggly +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::loggly-v3.0.3.asciidoc[] +include::loggly-v3.0.2.asciidoc[] +include::loggly-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/loggly-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/loggly-v3.0.1.asciidoc new file mode 100644 index 000000000..73593c01f --- /dev/null +++ b/docs/versioned-plugins/outputs/loggly-v3.0.1.asciidoc @@ -0,0 +1,164 @@ +:plugin: loggly +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-loggly/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Loggly output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Ugly monkey patch to get around http://jira.codehaus.org/browse/JRUBY-5529 +Got a loggly account? Use logstash to ship logs to Loggly! + +This is most useful so you can use logstash to parse and structure +your logs and ship structured, json events to your account at Loggly. + +To use this, you'll need to use a Loggly input with type 'http' +and 'json logging' enabled. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Loggly Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-can_retry>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-proto>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-can_retry"] +===== `can_retry` + + * Value type is <> + * Default value is `true` + +Can Retry. +Setting this value true helps user to send multiple retry attempts if the first request fails + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"logs-01.loggly.com"` + +The hostname to send logs to. This should target the loggly http input +server which is usually "logs-01.loggly.com" (Gen2 account). +See Loggly HTTP endpoint documentation at +https://www.loggly.com/docs/http-endpoint/ + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The loggly http input key to send to. +This is usually visible in the Loggly 'Inputs' page as something like this: +.... + https://logs-01.loggly.net/inputs/abcdef12-3456-7890-abcd-ef0123456789 + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + \----------> key <-------------/ +.... +You can use `%{foo}` field lookups here if you need to pull the api key from +the event. This is mainly aimed at multitenant hosting providers who want +to offer shipping a customer's logs to that customer's loggly account. + +[id="{version}-plugins-{type}s-{plugin}-proto"] +===== `proto` + + * Value type is <> + * Default value is `"http"` + +Should the log action be sent over https instead of plain http + +[id="{version}-plugins-{type}s-{plugin}-proxy_host"] +===== `proxy_host` + + * Value type is <> + * There is no default value for this setting. + +Proxy Host + +[id="{version}-plugins-{type}s-{plugin}-proxy_password"] +===== `proxy_password` + + * Value type is <> + * Default value is `""` + +Proxy Password + +[id="{version}-plugins-{type}s-{plugin}-proxy_port"] +===== `proxy_port` + + * Value type is <> + * There is no default value for this setting. + +Proxy Port + +[id="{version}-plugins-{type}s-{plugin}-proxy_user"] +===== `proxy_user` + + * Value type is <> + * There is no default value for this setting. + +Proxy Username + +[id="{version}-plugins-{type}s-{plugin}-retry_count"] +===== `retry_count` + + * Value type is <> + * Default value is `5` + +Retry count. +It may be possible that the request may timeout due to slow Internet connection +if such condition appears, retry_count helps in retrying request for multiple times +It will try to submit request until retry_count and then halt + +[id="{version}-plugins-{type}s-{plugin}-tag"] +===== `tag` + + * Value type is <> + * Default value is `"logstash"` + +Loggly Tag +Tag helps you to find your logs in the Loggly dashboard easily +You can make a search in Loggly using tag as "tag:logstash-contrib" +or the tag set by you in the config file. + +You can use %{somefield} to allow for custom tag values. +Helpful for leveraging Loggly source groups. +https://www.loggly.com/docs/source-groups/ + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/loggly-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/loggly-v3.0.2.asciidoc new file mode 100644 index 000000000..599c3a5da --- /dev/null +++ b/docs/versioned-plugins/outputs/loggly-v3.0.2.asciidoc @@ -0,0 +1,164 @@ +:plugin: loggly +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-loggly/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Loggly output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Ugly monkey patch to get around http://jira.codehaus.org/browse/JRUBY-5529 +Got a loggly account? Use logstash to ship logs to Loggly! + +This is most useful so you can use logstash to parse and structure +your logs and ship structured, json events to your account at Loggly. + +To use this, you'll need to use a Loggly input with type 'http' +and 'json logging' enabled. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Loggly Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-can_retry>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-proto>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-can_retry"] +===== `can_retry` + + * Value type is <> + * Default value is `true` + +Can Retry. +Setting this value true helps user to send multiple retry attempts if the first request fails + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"logs-01.loggly.com"` + +The hostname to send logs to. This should target the loggly http input +server which is usually "logs-01.loggly.com" (Gen2 account). +See Loggly HTTP endpoint documentation at +https://www.loggly.com/docs/http-endpoint/ + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The loggly http input key to send to. +This is usually visible in the Loggly 'Inputs' page as something like this: +.... + https://logs-01.loggly.net/inputs/abcdef12-3456-7890-abcd-ef0123456789 + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + \----------> key <-------------/ +.... +You can use `%{foo}` field lookups here if you need to pull the api key from +the event. This is mainly aimed at multitenant hosting providers who want +to offer shipping a customer's logs to that customer's loggly account. + +[id="{version}-plugins-{type}s-{plugin}-proto"] +===== `proto` + + * Value type is <> + * Default value is `"http"` + +Should the log action be sent over https instead of plain http + +[id="{version}-plugins-{type}s-{plugin}-proxy_host"] +===== `proxy_host` + + * Value type is <> + * There is no default value for this setting. + +Proxy Host + +[id="{version}-plugins-{type}s-{plugin}-proxy_password"] +===== `proxy_password` + + * Value type is <> + * Default value is `""` + +Proxy Password + +[id="{version}-plugins-{type}s-{plugin}-proxy_port"] +===== `proxy_port` + + * Value type is <> + * There is no default value for this setting. + +Proxy Port + +[id="{version}-plugins-{type}s-{plugin}-proxy_user"] +===== `proxy_user` + + * Value type is <> + * There is no default value for this setting. + +Proxy Username + +[id="{version}-plugins-{type}s-{plugin}-retry_count"] +===== `retry_count` + + * Value type is <> + * Default value is `5` + +Retry count. +It may be possible that the request may timeout due to slow Internet connection +if such condition appears, retry_count helps in retrying request for multiple times +It will try to submit request until retry_count and then halt + +[id="{version}-plugins-{type}s-{plugin}-tag"] +===== `tag` + + * Value type is <> + * Default value is `"logstash"` + +Loggly Tag +Tag helps you to find your logs in the Loggly dashboard easily +You can make a search in Loggly using tag as "tag:logstash-contrib" +or the tag set by you in the config file. + +You can use %{somefield} to allow for custom tag values. +Helpful for leveraging Loggly source groups. +https://www.loggly.com/docs/source-groups/ + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/loggly-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/loggly-v3.0.3.asciidoc new file mode 100644 index 000000000..b699dd05c --- /dev/null +++ b/docs/versioned-plugins/outputs/loggly-v3.0.3.asciidoc @@ -0,0 +1,164 @@ +:plugin: loggly +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-loggly/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Loggly output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Ugly monkey patch to get around http://jira.codehaus.org/browse/JRUBY-5529 +Got a loggly account? Use logstash to ship logs to Loggly! + +This is most useful so you can use logstash to parse and structure +your logs and ship structured, json events to your account at Loggly. + +To use this, you'll need to use a Loggly input with type 'http' +and 'json logging' enabled. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Loggly Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-can_retry>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-proto>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-tag>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-can_retry"] +===== `can_retry` + + * Value type is <> + * Default value is `true` + +Can Retry. +Setting this value true helps user to send multiple retry attempts if the first request fails + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"logs-01.loggly.com"` + +The hostname to send logs to. This should target the loggly http input +server which is usually "logs-01.loggly.com" (Gen2 account). +See Loggly HTTP endpoint documentation at +https://www.loggly.com/docs/http-endpoint/ + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The loggly http input key to send to. +This is usually visible in the Loggly 'Inputs' page as something like this: +.... + https://logs-01.loggly.net/inputs/abcdef12-3456-7890-abcd-ef0123456789 + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + \----------> key <-------------/ +.... +You can use `%{foo}` field lookups here if you need to pull the api key from +the event. This is mainly aimed at multitenant hosting providers who want +to offer shipping a customer's logs to that customer's loggly account. + +[id="{version}-plugins-{type}s-{plugin}-proto"] +===== `proto` + + * Value type is <> + * Default value is `"http"` + +Should the log action be sent over https instead of plain http + +[id="{version}-plugins-{type}s-{plugin}-proxy_host"] +===== `proxy_host` + + * Value type is <> + * There is no default value for this setting. + +Proxy Host + +[id="{version}-plugins-{type}s-{plugin}-proxy_password"] +===== `proxy_password` + + * Value type is <> + * Default value is `""` + +Proxy Password + +[id="{version}-plugins-{type}s-{plugin}-proxy_port"] +===== `proxy_port` + + * Value type is <> + * There is no default value for this setting. + +Proxy Port + +[id="{version}-plugins-{type}s-{plugin}-proxy_user"] +===== `proxy_user` + + * Value type is <> + * There is no default value for this setting. + +Proxy Username + +[id="{version}-plugins-{type}s-{plugin}-retry_count"] +===== `retry_count` + + * Value type is <> + * Default value is `5` + +Retry count. +It may be possible that the request may timeout due to slow Internet connection +if such condition appears, retry_count helps in retrying request for multiple times +It will try to submit request until retry_count and then halt + +[id="{version}-plugins-{type}s-{plugin}-tag"] +===== `tag` + + * Value type is <> + * Default value is `"logstash"` + +Loggly Tag +Tag helps you to find your logs in the Loggly dashboard easily +You can make a search in Loggly using tag as "tag:logstash-contrib" +or the tag set by you in the config file. + +You can use %{somefield} to allow for custom tag values. +Helpful for leveraging Loggly source groups. +https://www.loggly.com/docs/source-groups/ + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/lumberjack-index.asciidoc b/docs/versioned-plugins/outputs/lumberjack-index.asciidoc new file mode 100644 index 000000000..dc919efd6 --- /dev/null +++ b/docs/versioned-plugins/outputs/lumberjack-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: lumberjack +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-21 +| <> | 2017-06-23 +|======================================================================= + +include::lumberjack-v3.1.5.asciidoc[] +include::lumberjack-v3.1.3.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/lumberjack-v3.1.3.asciidoc b/docs/versioned-plugins/outputs/lumberjack-v3.1.3.asciidoc new file mode 100644 index 000000000..36916f5f6 --- /dev/null +++ b/docs/versioned-plugins/outputs/lumberjack-v3.1.3.asciidoc @@ -0,0 +1,101 @@ +:plugin: lumberjack +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-lumberjack/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Lumberjack output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Lumberjack Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` + + * Value type is <> + * Default value is `1024` + +To make efficient calls to the lumberjack output we are buffering events locally. +if the number of events exceed the number the declared `flush_size` we will +send them to the logstash server. + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +list of addresses lumberjack can send to + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` + + * Value type is <> + * Default value is `1` + +The amount of time since last flush before a flush is forced. + +This setting helps ensure slow event rates don't get stuck in Logstash. +For example, if your `flush_size` is 100, and you have received 10 events, +and it has been more than `idle_flush_time` seconds since the last flush, +Logstash will flush those 10 events automatically. + +This helps keep both fast and slow log streams moving along in +near-real-time. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +the port to connect to + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +ssl certificate to use + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/lumberjack-v3.1.5.asciidoc b/docs/versioned-plugins/outputs/lumberjack-v3.1.5.asciidoc new file mode 100644 index 000000000..b457655c5 --- /dev/null +++ b/docs/versioned-plugins/outputs/lumberjack-v3.1.5.asciidoc @@ -0,0 +1,102 @@ +:plugin: lumberjack +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.5 +:release_date: 2017-08-21 +:changelog_url: https://github.com/logstash-plugins/logstash-output-lumberjack/blob/v3.1.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Lumberjack output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output sends events using the lumberjack protocol. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Lumberjack Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` + + * Value type is <> + * Default value is `1024` + +To make efficient calls to the lumberjack output we are buffering events locally. +if the number of events exceed the number the declared `flush_size` we will +send them to the logstash server. + +[id="{version}-plugins-{type}s-{plugin}-hosts"] +===== `hosts` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +list of addresses lumberjack can send to + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` + + * Value type is <> + * Default value is `1` + +The amount of time since last flush before a flush is forced. + +This setting helps ensure slow event rates don't get stuck in Logstash. +For example, if your `flush_size` is 100, and you have received 10 events, +and it has been more than `idle_flush_time` seconds since the last flush, +Logstash will flush those 10 events automatically. + +This helps keep both fast and slow log streams moving along in +near-real-time. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +the port to connect to + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] +===== `ssl_certificate` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +ssl certificate to use + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/metriccatcher-index.asciidoc b/docs/versioned-plugins/outputs/metriccatcher-index.asciidoc new file mode 100644 index 000000000..928c1c4e5 --- /dev/null +++ b/docs/versioned-plugins/outputs/metriccatcher-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: metriccatcher +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::metriccatcher-v3.0.3.asciidoc[] +include::metriccatcher-v3.0.2.asciidoc[] +include::metriccatcher-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/metriccatcher-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/metriccatcher-v3.0.1.asciidoc new file mode 100644 index 000000000..6c0cc647f --- /dev/null +++ b/docs/versioned-plugins/outputs/metriccatcher-v3.0.1.asciidoc @@ -0,0 +1,164 @@ +:plugin: metriccatcher +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-metriccatcher/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Metriccatcher output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output ships metrics to MetricCatcher, allowing you to +utilize Coda Hale's Metrics. + +More info on MetricCatcher: https://github.com/clearspring/MetricCatcher + +At Clearspring, we use it to count the response codes from Apache logs: +[source,ruby] + metriccatcher { + host => "localhost" + port => "1420" + type => "apache-access" + fields => [ "response" ] + meter => { + "%{host}.apache.response.%{response}" => "1" + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Metriccatcher Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-biased>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-counter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-meter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timer>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-uniform>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-biased"] +===== `biased` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-counter"] +===== `counter` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. Example: +[source,ruby] + counter => { "%{host}.apache.hits.%{response} => "1" } + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-gauge"] +===== `gauge` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The address of the MetricCatcher + +[id="{version}-plugins-{type}s-{plugin}-meter"] +===== `meter` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `1420` + +The port to connect on your MetricCatcher + +[id="{version}-plugins-{type}s-{plugin}-timer"] +===== `timer` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like %{host} +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. Example: +[source,ruby] + timer => { "%{host}.apache.response_time => "%{response_time}" } + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-uniform"] +===== `uniform` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/metriccatcher-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/metriccatcher-v3.0.2.asciidoc new file mode 100644 index 000000000..e00323ef6 --- /dev/null +++ b/docs/versioned-plugins/outputs/metriccatcher-v3.0.2.asciidoc @@ -0,0 +1,164 @@ +:plugin: metriccatcher +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-metriccatcher/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Metriccatcher output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output ships metrics to MetricCatcher, allowing you to +utilize Coda Hale's Metrics. + +More info on MetricCatcher: https://github.com/clearspring/MetricCatcher + +At Clearspring, we use it to count the response codes from Apache logs: +[source,ruby] + metriccatcher { + host => "localhost" + port => "1420" + type => "apache-access" + fields => [ "response" ] + meter => { + "%{host}.apache.response.%{response}" => "1" + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Metriccatcher Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-biased>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-counter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-meter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timer>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-uniform>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-biased"] +===== `biased` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-counter"] +===== `counter` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. Example: +[source,ruby] + counter => { "%{host}.apache.hits.%{response} => "1" } + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-gauge"] +===== `gauge` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The address of the MetricCatcher + +[id="{version}-plugins-{type}s-{plugin}-meter"] +===== `meter` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `1420` + +The port to connect on your MetricCatcher + +[id="{version}-plugins-{type}s-{plugin}-timer"] +===== `timer` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like %{host} +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. Example: +[source,ruby] + timer => { "%{host}.apache.response_time => "%{response_time}" } + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-uniform"] +===== `uniform` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/metriccatcher-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/metriccatcher-v3.0.3.asciidoc new file mode 100644 index 000000000..c3bb52735 --- /dev/null +++ b/docs/versioned-plugins/outputs/metriccatcher-v3.0.3.asciidoc @@ -0,0 +1,164 @@ +:plugin: metriccatcher +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-metriccatcher/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Metriccatcher output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output ships metrics to MetricCatcher, allowing you to +utilize Coda Hale's Metrics. + +More info on MetricCatcher: https://github.com/clearspring/MetricCatcher + +At Clearspring, we use it to count the response codes from Apache logs: +[source,ruby] + metriccatcher { + host => "localhost" + port => "1420" + type => "apache-access" + fields => [ "response" ] + meter => { + "%{host}.apache.response.%{response}" => "1" + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Metriccatcher Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-biased>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-counter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-meter>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timer>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-uniform>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-biased"] +===== `biased` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-counter"] +===== `counter` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. Example: +[source,ruby] + counter => { "%{host}.apache.hits.%{response} => "1" } + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-gauge"] +===== `gauge` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The address of the MetricCatcher + +[id="{version}-plugins-{type}s-{plugin}-meter"] +===== `meter` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `1420` + +The port to connect on your MetricCatcher + +[id="{version}-plugins-{type}s-{plugin}-timer"] +===== `timer` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like %{host} +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. Example: +[source,ruby] + timer => { "%{host}.apache.response_time => "%{response_time}" } + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-uniform"] +===== `uniform` + + * Value type is <> + * There is no default value for this setting. + +The metrics to send. This supports dynamic strings like `%{host}` +for metric names and also for values. This is a hash field with key +of the metric name, value of the metric value. + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/monasca_log_api-index.asciidoc b/docs/versioned-plugins/outputs/monasca_log_api-index.asciidoc new file mode 100644 index 000000000..6da217799 --- /dev/null +++ b/docs/versioned-plugins/outputs/monasca_log_api-index.asciidoc @@ -0,0 +1,12 @@ +:plugin: monasca_log_api +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-10-25 +|======================================================================= + +include::monasca_log_api-v1.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/monasca_log_api-v1.0.2.asciidoc b/docs/versioned-plugins/outputs/monasca_log_api-v1.0.2.asciidoc new file mode 100644 index 000000000..1df18f424 --- /dev/null +++ b/docs/versioned-plugins/outputs/monasca_log_api-v1.0.2.asciidoc @@ -0,0 +1,179 @@ +:plugin: monasca_log_api +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.2 +:release_date: 2017-10-25 +:changelog_url: https://github.com/logstash-plugins/logstash-output-monasca_log_api/blob/v1.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Monasca_log_api output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +relative requirements +This Logstash Output plugin, sends events to monasca-api. +It authenticates against keystone and gets a token. +The token is used to authenticate against the monasca-api and send log events. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Monasca_log_api Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-dimensions>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-elapsed_time_sec>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystone_api_insecure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystone_api_url>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-max_data_size_kb>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-monasca_log_api_insecure>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-monasca_log_api_url>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-num_of_logs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-project_domain_name>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-project_name>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-user_domain_name>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-delay"] +===== `delay` + + * Value type is <> + * Default value is `10` + + + +[id="{version}-plugins-{type}s-{plugin}-dimensions"] +===== `dimensions` + + * Value type is <> + * There is no default value for this setting. + +global dimensions + +[id="{version}-plugins-{type}s-{plugin}-elapsed_time_sec"] +===== `elapsed_time_sec` + + * Value type is <> + * Default value is `30` + + + +[id="{version}-plugins-{type}s-{plugin}-keystone_api_insecure"] +===== `keystone_api_insecure` + + * Value type is <> + * Default value is `false` + + + +[id="{version}-plugins-{type}s-{plugin}-keystone_api_url"] +===== `keystone_api_url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +keystone configuration + +[id="{version}-plugins-{type}s-{plugin}-max_data_size_kb"] +===== `max_data_size_kb` + + * Value type is <> + * Default value is `5120` + + + +[id="{version}-plugins-{type}s-{plugin}-monasca_log_api_insecure"] +===== `monasca_log_api_insecure` + + * Value type is <> + * Default value is `false` + + + +[id="{version}-plugins-{type}s-{plugin}-monasca_log_api_url"] +===== `monasca_log_api_url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +monasca-log-api configuration + +[id="{version}-plugins-{type}s-{plugin}-num_of_logs"] +===== `num_of_logs` + + * Value type is <> + * Default value is `125` + + + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-project_domain_name"] +===== `project_domain_name` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-project_name"] +===== `project_name` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-user_domain_name"] +===== `user_domain_name` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + + + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/mongodb-index.asciidoc b/docs/versioned-plugins/outputs/mongodb-index.asciidoc new file mode 100644 index 000000000..d00bc1b48 --- /dev/null +++ b/docs/versioned-plugins/outputs/mongodb-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: mongodb +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::mongodb-v3.1.3.asciidoc[] +include::mongodb-v3.1.2.asciidoc[] +include::mongodb-v3.1.1.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/mongodb-v3.1.1.asciidoc b/docs/versioned-plugins/outputs/mongodb-v3.1.1.asciidoc new file mode 100644 index 000000000..27597f9ce --- /dev/null +++ b/docs/versioned-plugins/outputs/mongodb-v3.1.1.asciidoc @@ -0,0 +1,134 @@ +:plugin: mongodb +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-mongodb/blob/v3.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Mongodb output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output writes events to MongoDB. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Mongodb Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-bulk>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-collection>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-database>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-generateId>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-isodate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-uri>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-bulk"] +===== `bulk` + + * Value type is <> + * Default value is `false` + +Bulk insert flag, set to true to allow bulk insertion, else it will insert events one by one. + +[id="{version}-plugins-{type}s-{plugin}-bulk_interval"] +===== `bulk_interval` + + * Value type is <> + * Default value is `2` + +Bulk interval, Used to insert events periodically if the "bulk" flag is activated. + +[id="{version}-plugins-{type}s-{plugin}-bulk_size"] +===== `bulk_size` + + * Value type is <> + * Default value is `900` + +Bulk events number, if the number of events to insert into a collection raise that limit, it will be bulk inserted +whatever the bulk interval value (mongodb hard limit is 1000). + +[id="{version}-plugins-{type}s-{plugin}-collection"] +===== `collection` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The collection to use. This value can use `%{foo}` values to dynamically +select a collection based on data in the event. + +[id="{version}-plugins-{type}s-{plugin}-database"] +===== `database` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The database to use. + +[id="{version}-plugins-{type}s-{plugin}-generateId"] +===== `generateId` + + * Value type is <> + * Default value is `false` + +If true, an "_id" field will be added to the document before insertion. +The "_id" field will use the timestamp of the event and overwrite an existing +"_id" field in the event. + +[id="{version}-plugins-{type}s-{plugin}-isodate"] +===== `isodate` + + * Value type is <> + * Default value is `false` + +If true, store the @timestamp field in MongoDB as an ISODate type instead +of an ISO8601 string. For more information about this, see +http://www.mongodb.org/display/DOCS/Dates. + +[id="{version}-plugins-{type}s-{plugin}-retry_delay"] +===== `retry_delay` + + * Value type is <> + * Default value is `3` + +The number of seconds to wait after failure before retrying. + +[id="{version}-plugins-{type}s-{plugin}-uri"] +===== `uri` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A MongoDB URI to connect to. +See http://docs.mongodb.org/manual/reference/connection-string/. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/mongodb-v3.1.2.asciidoc b/docs/versioned-plugins/outputs/mongodb-v3.1.2.asciidoc new file mode 100644 index 000000000..72d690fe4 --- /dev/null +++ b/docs/versioned-plugins/outputs/mongodb-v3.1.2.asciidoc @@ -0,0 +1,134 @@ +:plugin: mongodb +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-mongodb/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Mongodb output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output writes events to MongoDB. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Mongodb Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-bulk>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-collection>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-database>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-generateId>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-isodate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-uri>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-bulk"] +===== `bulk` + + * Value type is <> + * Default value is `false` + +Bulk insert flag, set to true to allow bulk insertion, else it will insert events one by one. + +[id="{version}-plugins-{type}s-{plugin}-bulk_interval"] +===== `bulk_interval` + + * Value type is <> + * Default value is `2` + +Bulk interval, Used to insert events periodically if the "bulk" flag is activated. + +[id="{version}-plugins-{type}s-{plugin}-bulk_size"] +===== `bulk_size` + + * Value type is <> + * Default value is `900` + +Bulk events number, if the number of events to insert into a collection raise that limit, it will be bulk inserted +whatever the bulk interval value (mongodb hard limit is 1000). + +[id="{version}-plugins-{type}s-{plugin}-collection"] +===== `collection` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The collection to use. This value can use `%{foo}` values to dynamically +select a collection based on data in the event. + +[id="{version}-plugins-{type}s-{plugin}-database"] +===== `database` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The database to use. + +[id="{version}-plugins-{type}s-{plugin}-generateId"] +===== `generateId` + + * Value type is <> + * Default value is `false` + +If true, an "_id" field will be added to the document before insertion. +The "_id" field will use the timestamp of the event and overwrite an existing +"_id" field in the event. + +[id="{version}-plugins-{type}s-{plugin}-isodate"] +===== `isodate` + + * Value type is <> + * Default value is `false` + +If true, store the @timestamp field in MongoDB as an ISODate type instead +of an ISO8601 string. For more information about this, see +http://www.mongodb.org/display/DOCS/Dates. + +[id="{version}-plugins-{type}s-{plugin}-retry_delay"] +===== `retry_delay` + + * Value type is <> + * Default value is `3` + +The number of seconds to wait after failure before retrying. + +[id="{version}-plugins-{type}s-{plugin}-uri"] +===== `uri` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A MongoDB URI to connect to. +See http://docs.mongodb.org/manual/reference/connection-string/. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/mongodb-v3.1.3.asciidoc b/docs/versioned-plugins/outputs/mongodb-v3.1.3.asciidoc new file mode 100644 index 000000000..dfb04421d --- /dev/null +++ b/docs/versioned-plugins/outputs/mongodb-v3.1.3.asciidoc @@ -0,0 +1,134 @@ +:plugin: mongodb +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-mongodb/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Mongodb output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output writes events to MongoDB. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Mongodb Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-bulk>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bulk_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-collection>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-database>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-generateId>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-isodate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_delay>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-uri>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-bulk"] +===== `bulk` + + * Value type is <> + * Default value is `false` + +Bulk insert flag, set to true to allow bulk insertion, else it will insert events one by one. + +[id="{version}-plugins-{type}s-{plugin}-bulk_interval"] +===== `bulk_interval` + + * Value type is <> + * Default value is `2` + +Bulk interval, Used to insert events periodically if the "bulk" flag is activated. + +[id="{version}-plugins-{type}s-{plugin}-bulk_size"] +===== `bulk_size` + + * Value type is <> + * Default value is `900` + +Bulk events number, if the number of events to insert into a collection raise that limit, it will be bulk inserted +whatever the bulk interval value (mongodb hard limit is 1000). + +[id="{version}-plugins-{type}s-{plugin}-collection"] +===== `collection` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The collection to use. This value can use `%{foo}` values to dynamically +select a collection based on data in the event. + +[id="{version}-plugins-{type}s-{plugin}-database"] +===== `database` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The database to use. + +[id="{version}-plugins-{type}s-{plugin}-generateId"] +===== `generateId` + + * Value type is <> + * Default value is `false` + +If true, an "_id" field will be added to the document before insertion. +The "_id" field will use the timestamp of the event and overwrite an existing +"_id" field in the event. + +[id="{version}-plugins-{type}s-{plugin}-isodate"] +===== `isodate` + + * Value type is <> + * Default value is `false` + +If true, store the @timestamp field in MongoDB as an ISODate type instead +of an ISO8601 string. For more information about this, see +http://www.mongodb.org/display/DOCS/Dates. + +[id="{version}-plugins-{type}s-{plugin}-retry_delay"] +===== `retry_delay` + + * Value type is <> + * Default value is `3` + +The number of seconds to wait after failure before retrying. + +[id="{version}-plugins-{type}s-{plugin}-uri"] +===== `uri` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +A MongoDB URI to connect to. +See http://docs.mongodb.org/manual/reference/connection-string/. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/nagios-index.asciidoc b/docs/versioned-plugins/outputs/nagios-index.asciidoc new file mode 100644 index 000000000..a1bf00c2f --- /dev/null +++ b/docs/versioned-plugins/outputs/nagios-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: nagios +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-07 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::nagios-v3.0.5.asciidoc[] +include::nagios-v3.0.4.asciidoc[] +include::nagios-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/nagios-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/nagios-v3.0.3.asciidoc new file mode 100644 index 000000000..9c0ce8807 --- /dev/null +++ b/docs/versioned-plugins/outputs/nagios-v3.0.3.asciidoc @@ -0,0 +1,90 @@ +:plugin: nagios +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-nagios/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Nagios output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The Nagios output is used for sending passive check results to Nagios via the +Nagios command file. This output currently supports Nagios 3. + +For this output to work, your event _must_ have the following Logstash event fields: + + * `nagios_host` + * `nagios_service` + +These Logstash event fields are supported, but optional: + + * `nagios_annotation` + * `nagios_level` (overrides `nagios_level` configuration option) + +There are two configuration options: + + * `commandfile` - The location of the Nagios external command file. Defaults + to '/var/lib/nagios3/rw/nagios.cmd' + * `nagios_level` - Specifies the level of the check to be sent. Defaults to + CRITICAL and can be overriden by setting the "nagios_level" field to one + of "OK", "WARNING", "CRITICAL", or "UNKNOWN" +[source,ruby] + output{ + if [message] =~ /(error|ERROR|CRITICAL)/ { + nagios { + # your config here + } + } + } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Nagios Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-commandfile>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-nagios_level>> |<>, one of `["0", "1", "2", "3"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-commandfile"] +===== `commandfile` + + * Value type is <> + * Default value is `"/var/lib/nagios3/rw/nagios.cmd"` + +The full path to your Nagios command file. + +[id="{version}-plugins-{type}s-{plugin}-nagios_level"] +===== `nagios_level` + + * Value can be any of: `0`, `1`, `2`, `3` + * Default value is `"2"` + +The Nagios check level. Should be one of 0=OK, 1=WARNING, 2=CRITICAL, +3=UNKNOWN. Defaults to 2 - CRITICAL. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/nagios-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/nagios-v3.0.4.asciidoc new file mode 100644 index 000000000..a4ed51d13 --- /dev/null +++ b/docs/versioned-plugins/outputs/nagios-v3.0.4.asciidoc @@ -0,0 +1,90 @@ +:plugin: nagios +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-nagios/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Nagios output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The Nagios output is used for sending passive check results to Nagios via the +Nagios command file. This output currently supports Nagios 3. + +For this output to work, your event _must_ have the following Logstash event fields: + + * `nagios_host` + * `nagios_service` + +These Logstash event fields are supported, but optional: + + * `nagios_annotation` + * `nagios_level` (overrides `nagios_level` configuration option) + +There are two configuration options: + + * `commandfile` - The location of the Nagios external command file. Defaults + to '/var/lib/nagios3/rw/nagios.cmd' + * `nagios_level` - Specifies the level of the check to be sent. Defaults to + CRITICAL and can be overriden by setting the "nagios_level" field to one + of "OK", "WARNING", "CRITICAL", or "UNKNOWN" +[source,ruby] + output{ + if [message] =~ /(error|ERROR|CRITICAL)/ { + nagios { + # your config here + } + } + } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Nagios Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-commandfile>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-nagios_level>> |<>, one of `["0", "1", "2", "3"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-commandfile"] +===== `commandfile` + + * Value type is <> + * Default value is `"/var/lib/nagios3/rw/nagios.cmd"` + +The full path to your Nagios command file. + +[id="{version}-plugins-{type}s-{plugin}-nagios_level"] +===== `nagios_level` + + * Value can be any of: `0`, `1`, `2`, `3` + * Default value is `"2"` + +The Nagios check level. Should be one of 0=OK, 1=WARNING, 2=CRITICAL, +3=UNKNOWN. Defaults to 2 - CRITICAL. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/nagios-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/nagios-v3.0.5.asciidoc new file mode 100644 index 000000000..070121336 --- /dev/null +++ b/docs/versioned-plugins/outputs/nagios-v3.0.5.asciidoc @@ -0,0 +1,90 @@ +:plugin: nagios +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-07 +:changelog_url: https://github.com/logstash-plugins/logstash-output-nagios/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Nagios output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The Nagios output is used for sending passive check results to Nagios via the +Nagios command file. This output currently supports Nagios 3. + +For this output to work, your event _must_ have the following Logstash event fields: + + * `nagios_host` + * `nagios_service` + +These Logstash event fields are supported, but optional: + + * `nagios_annotation` + * `nagios_level` (overrides `nagios_level` configuration option) + +There are two configuration options: + + * `commandfile` - The location of the Nagios external command file. Defaults + to '/var/lib/nagios3/rw/nagios.cmd' + * `nagios_level` - Specifies the level of the check to be sent. Defaults to + CRITICAL and can be overriden by setting the "nagios_level" field to one + of "OK", "WARNING", "CRITICAL", or "UNKNOWN" +[source,ruby] + output{ + if [message] =~ /(error|ERROR|CRITICAL)/ { + nagios { + # your config here + } + } + } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Nagios Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-commandfile>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-nagios_level>> |<>, one of `["0", "1", "2", "3"]`|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-commandfile"] +===== `commandfile` + + * Value type is <> + * Default value is `"/var/lib/nagios3/rw/nagios.cmd"` + +The full path to your Nagios command file. + +[id="{version}-plugins-{type}s-{plugin}-nagios_level"] +===== `nagios_level` + + * Value can be any of: `0`, `1`, `2`, `3` + * Default value is `"2"` + +The Nagios check level. Should be one of 0=OK, 1=WARNING, 2=CRITICAL, +3=UNKNOWN. Defaults to 2 - CRITICAL. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/nagios_nsca-index.asciidoc b/docs/versioned-plugins/outputs/nagios_nsca-index.asciidoc new file mode 100644 index 000000000..c66b663ee --- /dev/null +++ b/docs/versioned-plugins/outputs/nagios_nsca-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: nagios_nsca +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::nagios_nsca-v3.0.5.asciidoc[] +include::nagios_nsca-v3.0.4.asciidoc[] +include::nagios_nsca-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/nagios_nsca-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/nagios_nsca-v3.0.3.asciidoc new file mode 100644 index 000000000..d9996e888 --- /dev/null +++ b/docs/versioned-plugins/outputs/nagios_nsca-v3.0.3.asciidoc @@ -0,0 +1,141 @@ +:plugin: nagios_nsca +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-nagios_nsca/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Nagios_nsca output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The nagios_nsca output is used for sending passive check results to Nagios +through the NSCA protocol. + +This is useful if your Nagios server is not the same as the source host from +where you want to send logs or alerts. If you only have one server, this +output is probably overkill # for you, take a look at the 'nagios' output +instead. + +Here is a sample config using the nagios_nsca output: +[source,ruby] + output { + nagios_nsca { + # specify the hostname or ip of your nagios server + host => "nagios.example.com" + + # specify the port to connect to + port => 5667 + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Nagios_nsca Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nagios_host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nagios_service>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nagios_status>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-send_nsca_bin>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-send_nsca_config>> |a valid filesystem path|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The nagios host or IP to send logs to. It should have a NSCA daemon running. + +[id="{version}-plugins-{type}s-{plugin}-message_format"] +===== `message_format` + + * Value type is <> + * Default value is `"%{@timestamp} %{host}: %{message}"` + +The format to use when writing events to nagios. This value +supports any string and can include `%{name}` and other dynamic +strings. + +[id="{version}-plugins-{type}s-{plugin}-nagios_host"] +===== `nagios_host` + + * Value type is <> + * Default value is `"%{host}"` + +The nagios 'host' you want to submit a passive check result to. This +parameter accepts interpolation, e.g. you can use `@source_host` or other +logstash internal variables. + +[id="{version}-plugins-{type}s-{plugin}-nagios_service"] +===== `nagios_service` + + * Value type is <> + * Default value is `"LOGSTASH"` + +The nagios 'service' you want to submit a passive check result to. This +parameter accepts interpolation, e.g. you can use `@source_host` or other +logstash internal variables. + +[id="{version}-plugins-{type}s-{plugin}-nagios_status"] +===== `nagios_status` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The status to send to nagios. Should be 0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5667` + +The port where the NSCA daemon on the nagios host listens. + +[id="{version}-plugins-{type}s-{plugin}-send_nsca_bin"] +===== `send_nsca_bin` + + * Value type is <> + * Default value is `"/usr/sbin/send_nsca"` + +The path to the 'send_nsca' binary on the local host. + +[id="{version}-plugins-{type}s-{plugin}-send_nsca_config"] +===== `send_nsca_config` + + * Value type is <> + * There is no default value for this setting. + +The path to the send_nsca config file on the local host. +Leave blank if you don't want to provide a config file. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/nagios_nsca-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/nagios_nsca-v3.0.4.asciidoc new file mode 100644 index 000000000..47391e705 --- /dev/null +++ b/docs/versioned-plugins/outputs/nagios_nsca-v3.0.4.asciidoc @@ -0,0 +1,141 @@ +:plugin: nagios_nsca +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-nagios_nsca/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Nagios_nsca output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The nagios_nsca output is used for sending passive check results to Nagios +through the NSCA protocol. + +This is useful if your Nagios server is not the same as the source host from +where you want to send logs or alerts. If you only have one server, this +output is probably overkill # for you, take a look at the 'nagios' output +instead. + +Here is a sample config using the nagios_nsca output: +[source,ruby] + output { + nagios_nsca { + # specify the hostname or ip of your nagios server + host => "nagios.example.com" + + # specify the port to connect to + port => 5667 + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Nagios_nsca Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nagios_host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nagios_service>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nagios_status>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-send_nsca_bin>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-send_nsca_config>> |a valid filesystem path|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The nagios host or IP to send logs to. It should have a NSCA daemon running. + +[id="{version}-plugins-{type}s-{plugin}-message_format"] +===== `message_format` + + * Value type is <> + * Default value is `"%{@timestamp} %{host}: %{message}"` + +The format to use when writing events to nagios. This value +supports any string and can include `%{name}` and other dynamic +strings. + +[id="{version}-plugins-{type}s-{plugin}-nagios_host"] +===== `nagios_host` + + * Value type is <> + * Default value is `"%{host}"` + +The nagios 'host' you want to submit a passive check result to. This +parameter accepts interpolation, e.g. you can use `@source_host` or other +logstash internal variables. + +[id="{version}-plugins-{type}s-{plugin}-nagios_service"] +===== `nagios_service` + + * Value type is <> + * Default value is `"LOGSTASH"` + +The nagios 'service' you want to submit a passive check result to. This +parameter accepts interpolation, e.g. you can use `@source_host` or other +logstash internal variables. + +[id="{version}-plugins-{type}s-{plugin}-nagios_status"] +===== `nagios_status` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The status to send to nagios. Should be 0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5667` + +The port where the NSCA daemon on the nagios host listens. + +[id="{version}-plugins-{type}s-{plugin}-send_nsca_bin"] +===== `send_nsca_bin` + + * Value type is <> + * Default value is `"/usr/sbin/send_nsca"` + +The path to the 'send_nsca' binary on the local host. + +[id="{version}-plugins-{type}s-{plugin}-send_nsca_config"] +===== `send_nsca_config` + + * Value type is <> + * There is no default value for this setting. + +The path to the send_nsca config file on the local host. +Leave blank if you don't want to provide a config file. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/nagios_nsca-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/nagios_nsca-v3.0.5.asciidoc new file mode 100644 index 000000000..d51955ecc --- /dev/null +++ b/docs/versioned-plugins/outputs/nagios_nsca-v3.0.5.asciidoc @@ -0,0 +1,141 @@ +:plugin: nagios_nsca +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-nagios_nsca/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Nagios_nsca output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The nagios_nsca output is used for sending passive check results to Nagios +through the NSCA protocol. + +This is useful if your Nagios server is not the same as the source host from +where you want to send logs or alerts. If you only have one server, this +output is probably overkill # for you, take a look at the 'nagios' output +instead. + +Here is a sample config using the nagios_nsca output: +[source,ruby] + output { + nagios_nsca { + # specify the hostname or ip of your nagios server + host => "nagios.example.com" + + # specify the port to connect to + port => 5667 + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Nagios_nsca Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nagios_host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nagios_service>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nagios_status>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-send_nsca_bin>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-send_nsca_config>> |a valid filesystem path|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The nagios host or IP to send logs to. It should have a NSCA daemon running. + +[id="{version}-plugins-{type}s-{plugin}-message_format"] +===== `message_format` + + * Value type is <> + * Default value is `"%{@timestamp} %{host}: %{message}"` + +The format to use when writing events to nagios. This value +supports any string and can include `%{name}` and other dynamic +strings. + +[id="{version}-plugins-{type}s-{plugin}-nagios_host"] +===== `nagios_host` + + * Value type is <> + * Default value is `"%{host}"` + +The nagios 'host' you want to submit a passive check result to. This +parameter accepts interpolation, e.g. you can use `@source_host` or other +logstash internal variables. + +[id="{version}-plugins-{type}s-{plugin}-nagios_service"] +===== `nagios_service` + + * Value type is <> + * Default value is `"LOGSTASH"` + +The nagios 'service' you want to submit a passive check result to. This +parameter accepts interpolation, e.g. you can use `@source_host` or other +logstash internal variables. + +[id="{version}-plugins-{type}s-{plugin}-nagios_status"] +===== `nagios_status` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The status to send to nagios. Should be 0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5667` + +The port where the NSCA daemon on the nagios host listens. + +[id="{version}-plugins-{type}s-{plugin}-send_nsca_bin"] +===== `send_nsca_bin` + + * Value type is <> + * Default value is `"/usr/sbin/send_nsca"` + +The path to the 'send_nsca' binary on the local host. + +[id="{version}-plugins-{type}s-{plugin}-send_nsca_config"] +===== `send_nsca_config` + + * Value type is <> + * There is no default value for this setting. + +The path to the send_nsca config file on the local host. +Leave blank if you don't want to provide a config file. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/neo4j-index.asciidoc b/docs/versioned-plugins/outputs/neo4j-index.asciidoc new file mode 100644 index 000000000..727b126d6 --- /dev/null +++ b/docs/versioned-plugins/outputs/neo4j-index.asciidoc @@ -0,0 +1,12 @@ +:plugin: neo4j +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-06-23 +|======================================================================= + +include::neo4j-v2.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/neo4j-v2.0.5.asciidoc b/docs/versioned-plugins/outputs/neo4j-v2.0.5.asciidoc new file mode 100644 index 000000000..e742a1096 --- /dev/null +++ b/docs/versioned-plugins/outputs/neo4j-v2.0.5.asciidoc @@ -0,0 +1,53 @@ +:plugin: neo4j +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-neo4j/blob/v2.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Neo4j output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Neo4j Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The path within your file system where the neo4j database is located + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/newrelic-index.asciidoc b/docs/versioned-plugins/outputs/newrelic-index.asciidoc new file mode 100644 index 000000000..43f875d50 --- /dev/null +++ b/docs/versioned-plugins/outputs/newrelic-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: newrelic +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/outputs/null-index.asciidoc b/docs/versioned-plugins/outputs/null-index.asciidoc new file mode 100644 index 000000000..ff281bcbf --- /dev/null +++ b/docs/versioned-plugins/outputs/null-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: null +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::null-v3.0.4.asciidoc[] +include::null-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/null-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/null-v3.0.3.asciidoc new file mode 100644 index 000000000..6d8beb532 --- /dev/null +++ b/docs/versioned-plugins/outputs/null-v3.0.3.asciidoc @@ -0,0 +1,37 @@ +:plugin: null +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-null/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Null output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +A null output. This is useful for testing logstash inputs and filters for +performance. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Null Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +|======================================================================= + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/null-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/null-v3.0.4.asciidoc new file mode 100644 index 000000000..dfb5c1133 --- /dev/null +++ b/docs/versioned-plugins/outputs/null-v3.0.4.asciidoc @@ -0,0 +1,37 @@ +:plugin: null +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-null/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Null output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +A null output. This is useful for testing logstash inputs and filters for +performance. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Null Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +|======================================================================= + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/opentsdb-index.asciidoc b/docs/versioned-plugins/outputs/opentsdb-index.asciidoc new file mode 100644 index 000000000..f1891d463 --- /dev/null +++ b/docs/versioned-plugins/outputs/opentsdb-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: opentsdb +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::opentsdb-v3.1.4.asciidoc[] +include::opentsdb-v3.1.3.asciidoc[] +include::opentsdb-v3.1.2.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/opentsdb-v3.1.2.asciidoc b/docs/versioned-plugins/outputs/opentsdb-v3.1.2.asciidoc new file mode 100644 index 000000000..cc92ac010 --- /dev/null +++ b/docs/versioned-plugins/outputs/opentsdb-v3.1.2.asciidoc @@ -0,0 +1,87 @@ +:plugin: opentsdb +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-opentsdb/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Opentsdb output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you to pull metrics from your logs and ship them to +opentsdb. Opentsdb is an open source tool for storing and graphing metrics. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Opentsdb Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The address of the opentsdb server. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The metric(s) to use. This supports dynamic strings like %{source_host} +for metric names and also for values. This is an array field with key +of the metric name, value of the metric value, and multiple tag,values . Example: +[source,ruby] + [ + "%{host}/uptime", + %{uptime_1m} " , + "hostname" , + "%{host} + "anotherhostname" , + "%{host} + ] + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `4242` + +The port to connect on your graphite server. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/opentsdb-v3.1.3.asciidoc b/docs/versioned-plugins/outputs/opentsdb-v3.1.3.asciidoc new file mode 100644 index 000000000..b15df20a0 --- /dev/null +++ b/docs/versioned-plugins/outputs/opentsdb-v3.1.3.asciidoc @@ -0,0 +1,87 @@ +:plugin: opentsdb +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-opentsdb/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Opentsdb output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you to pull metrics from your logs and ship them to +opentsdb. Opentsdb is an open source tool for storing and graphing metrics. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Opentsdb Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The address of the opentsdb server. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The metric(s) to use. This supports dynamic strings like %{source_host} +for metric names and also for values. This is an array field with key +of the metric name, value of the metric value, and multiple tag,values . Example: +[source,ruby] + [ + "%{host}/uptime", + %{uptime_1m} " , + "hostname" , + "%{host} + "anotherhostname" , + "%{host} + ] + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `4242` + +The port to connect on your graphite server. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/opentsdb-v3.1.4.asciidoc b/docs/versioned-plugins/outputs/opentsdb-v3.1.4.asciidoc new file mode 100644 index 000000000..b83c23945 --- /dev/null +++ b/docs/versioned-plugins/outputs/opentsdb-v3.1.4.asciidoc @@ -0,0 +1,87 @@ +:plugin: opentsdb +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.4 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-opentsdb/blob/v3.1.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Opentsdb output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you to pull metrics from your logs and ship them to +opentsdb. Opentsdb is an open source tool for storing and graphing metrics. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Opentsdb Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The address of the opentsdb server. + +[id="{version}-plugins-{type}s-{plugin}-metrics"] +===== `metrics` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The metric(s) to use. This supports dynamic strings like %{source_host} +for metric names and also for values. This is an array field with key +of the metric name, value of the metric value, and multiple tag,values . Example: +[source,ruby] + [ + "%{host}/uptime", + %{uptime_1m} " , + "hostname" , + "%{host} + "anotherhostname" , + "%{host} + ] + +The value will be coerced to a floating point value. Values which cannot be +coerced will zero (0) + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `4242` + +The port to connect on your graphite server. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/pagerduty-index.asciidoc b/docs/versioned-plugins/outputs/pagerduty-index.asciidoc new file mode 100644 index 000000000..a2f189b21 --- /dev/null +++ b/docs/versioned-plugins/outputs/pagerduty-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: pagerduty +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::pagerduty-v3.0.6.asciidoc[] +include::pagerduty-v3.0.5.asciidoc[] +include::pagerduty-v3.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/pagerduty-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/pagerduty-v3.0.4.asciidoc new file mode 100644 index 000000000..e5c4289d5 --- /dev/null +++ b/docs/versioned-plugins/outputs/pagerduty-v3.0.4.asciidoc @@ -0,0 +1,105 @@ +:plugin: pagerduty +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-pagerduty/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Pagerduty output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The PagerDuty output will send notifications based on pre-configured services +and escalation policies. Logstash can send "trigger", "acknowledge" and "resolve" +event types. In addition, you may configure custom descriptions and event details. +The only required field is the PagerDuty "Service API Key", which can be found on +the service's web page on pagerduty.com. In the default case, the description and +event details will be populated by Logstash, using `message`, `timestamp` and `host` data. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Pagerduty Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-description>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-details>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-event_type>> |<>, one of `["trigger", "acknowledge", "resolve"]`|No +| <<{version}-plugins-{type}s-{plugin}-incident_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pdurl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-service_key>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-description"] +===== `description` + + * Value type is <> + * Default value is `"Logstash event for %{host}"` + +Custom description + +[id="{version}-plugins-{type}s-{plugin}-details"] +===== `details` + + * Value type is <> + * Default value is `{"timestamp"=>"%{@timestamp}", "message"=>"%{message}"}` + +The event details. These might be data from the Logstash event fields you wish to include. +Tags are automatically included if detected so there is no need to explicitly add them here. + +[id="{version}-plugins-{type}s-{plugin}-event_type"] +===== `event_type` + + * Value can be any of: `trigger`, `acknowledge`, `resolve` + * Default value is `"trigger"` + +Event type + +[id="{version}-plugins-{type}s-{plugin}-incident_key"] +===== `incident_key` + + * Value type is <> + * Default value is `"logstash/%{host}/%{type}"` + +The service key to use. You'll need to set this up in PagerDuty beforehand. + +[id="{version}-plugins-{type}s-{plugin}-pdurl"] +===== `pdurl` + + * Value type is <> + * Default value is `"https://events.pagerduty.com/generic/2010-04-15/create_event.json"` + +PagerDuty API URL. You shouldn't need to change this, but is included to allow for flexibility +should PagerDuty iterate the API and Logstash hasn't been updated yet. + +[id="{version}-plugins-{type}s-{plugin}-service_key"] +===== `service_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The PagerDuty Service API Key + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/pagerduty-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/pagerduty-v3.0.5.asciidoc new file mode 100644 index 000000000..b93f9edb1 --- /dev/null +++ b/docs/versioned-plugins/outputs/pagerduty-v3.0.5.asciidoc @@ -0,0 +1,105 @@ +:plugin: pagerduty +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-pagerduty/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Pagerduty output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The PagerDuty output will send notifications based on pre-configured services +and escalation policies. Logstash can send "trigger", "acknowledge" and "resolve" +event types. In addition, you may configure custom descriptions and event details. +The only required field is the PagerDuty "Service API Key", which can be found on +the service's web page on pagerduty.com. In the default case, the description and +event details will be populated by Logstash, using `message`, `timestamp` and `host` data. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Pagerduty Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-description>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-details>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-event_type>> |<>, one of `["trigger", "acknowledge", "resolve"]`|No +| <<{version}-plugins-{type}s-{plugin}-incident_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pdurl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-service_key>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-description"] +===== `description` + + * Value type is <> + * Default value is `"Logstash event for %{host}"` + +Custom description + +[id="{version}-plugins-{type}s-{plugin}-details"] +===== `details` + + * Value type is <> + * Default value is `{"timestamp"=>"%{@timestamp}", "message"=>"%{message}"}` + +The event details. These might be data from the Logstash event fields you wish to include. +Tags are automatically included if detected so there is no need to explicitly add them here. + +[id="{version}-plugins-{type}s-{plugin}-event_type"] +===== `event_type` + + * Value can be any of: `trigger`, `acknowledge`, `resolve` + * Default value is `"trigger"` + +Event type + +[id="{version}-plugins-{type}s-{plugin}-incident_key"] +===== `incident_key` + + * Value type is <> + * Default value is `"logstash/%{host}/%{type}"` + +The service key to use. You'll need to set this up in PagerDuty beforehand. + +[id="{version}-plugins-{type}s-{plugin}-pdurl"] +===== `pdurl` + + * Value type is <> + * Default value is `"https://events.pagerduty.com/generic/2010-04-15/create_event.json"` + +PagerDuty API URL. You shouldn't need to change this, but is included to allow for flexibility +should PagerDuty iterate the API and Logstash hasn't been updated yet. + +[id="{version}-plugins-{type}s-{plugin}-service_key"] +===== `service_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The PagerDuty Service API Key + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/pagerduty-v3.0.6.asciidoc b/docs/versioned-plugins/outputs/pagerduty-v3.0.6.asciidoc new file mode 100644 index 000000000..fd2777193 --- /dev/null +++ b/docs/versioned-plugins/outputs/pagerduty-v3.0.6.asciidoc @@ -0,0 +1,105 @@ +:plugin: pagerduty +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-pagerduty/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Pagerduty output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The PagerDuty output will send notifications based on pre-configured services +and escalation policies. Logstash can send "trigger", "acknowledge" and "resolve" +event types. In addition, you may configure custom descriptions and event details. +The only required field is the PagerDuty "Service API Key", which can be found on +the service's web page on pagerduty.com. In the default case, the description and +event details will be populated by Logstash, using `message`, `timestamp` and `host` data. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Pagerduty Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-description>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-details>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-event_type>> |<>, one of `["trigger", "acknowledge", "resolve"]`|No +| <<{version}-plugins-{type}s-{plugin}-incident_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pdurl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-service_key>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-description"] +===== `description` + + * Value type is <> + * Default value is `"Logstash event for %{host}"` + +Custom description + +[id="{version}-plugins-{type}s-{plugin}-details"] +===== `details` + + * Value type is <> + * Default value is `{"timestamp"=>"%{@timestamp}", "message"=>"%{message}"}` + +The event details. These might be data from the Logstash event fields you wish to include. +Tags are automatically included if detected so there is no need to explicitly add them here. + +[id="{version}-plugins-{type}s-{plugin}-event_type"] +===== `event_type` + + * Value can be any of: `trigger`, `acknowledge`, `resolve` + * Default value is `"trigger"` + +Event type + +[id="{version}-plugins-{type}s-{plugin}-incident_key"] +===== `incident_key` + + * Value type is <> + * Default value is `"logstash/%{host}/%{type}"` + +The service key to use. You'll need to set this up in PagerDuty beforehand. + +[id="{version}-plugins-{type}s-{plugin}-pdurl"] +===== `pdurl` + + * Value type is <> + * Default value is `"https://events.pagerduty.com/generic/2010-04-15/create_event.json"` + +PagerDuty API URL. You shouldn't need to change this, but is included to allow for flexibility +should PagerDuty iterate the API and Logstash hasn't been updated yet. + +[id="{version}-plugins-{type}s-{plugin}-service_key"] +===== `service_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The PagerDuty Service API Key + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/pipe-index.asciidoc b/docs/versioned-plugins/outputs/pipe-index.asciidoc new file mode 100644 index 000000000..b13faa213 --- /dev/null +++ b/docs/versioned-plugins/outputs/pipe-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: pipe +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::pipe-v3.0.5.asciidoc[] +include::pipe-v3.0.4.asciidoc[] +include::pipe-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/pipe-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/pipe-v3.0.3.asciidoc new file mode 100644 index 000000000..e68be7cd0 --- /dev/null +++ b/docs/versioned-plugins/outputs/pipe-v3.0.3.asciidoc @@ -0,0 +1,80 @@ +:plugin: pipe +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-pipe/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Pipe output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Pipe output. + +Pipe events to stdin of another program. You can use fields from the +event as parts of the command. +WARNING: This feature can cause logstash to fork off multiple children if you are not carefull with per-event commandline. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Pipe Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ttl>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-command"] +===== `command` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Command line to launch and pipe to + +[id="{version}-plugins-{type}s-{plugin}-message_format"] +===== `message_format` + + * Value type is <> + * There is no default value for this setting. + +The format to use when writing events to the pipe. This value +supports any string and can include `%{name}` and other dynamic +strings. + +If this setting is omitted, the full json representation of the +event will be written as a single line. + +[id="{version}-plugins-{type}s-{plugin}-ttl"] +===== `ttl` + + * Value type is <> + * Default value is `10` + +Close pipe that hasn't been used for TTL seconds. -1 or 0 means never close. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/pipe-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/pipe-v3.0.4.asciidoc new file mode 100644 index 000000000..17e9d177e --- /dev/null +++ b/docs/versioned-plugins/outputs/pipe-v3.0.4.asciidoc @@ -0,0 +1,80 @@ +:plugin: pipe +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-pipe/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Pipe output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Pipe output. + +Pipe events to stdin of another program. You can use fields from the +event as parts of the command. +WARNING: This feature can cause logstash to fork off multiple children if you are not carefull with per-event commandline. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Pipe Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ttl>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-command"] +===== `command` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Command line to launch and pipe to + +[id="{version}-plugins-{type}s-{plugin}-message_format"] +===== `message_format` + + * Value type is <> + * There is no default value for this setting. + +The format to use when writing events to the pipe. This value +supports any string and can include `%{name}` and other dynamic +strings. + +If this setting is omitted, the full json representation of the +event will be written as a single line. + +[id="{version}-plugins-{type}s-{plugin}-ttl"] +===== `ttl` + + * Value type is <> + * Default value is `10` + +Close pipe that hasn't been used for TTL seconds. -1 or 0 means never close. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/pipe-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/pipe-v3.0.5.asciidoc new file mode 100644 index 000000000..bb1da5e6b --- /dev/null +++ b/docs/versioned-plugins/outputs/pipe-v3.0.5.asciidoc @@ -0,0 +1,80 @@ +:plugin: pipe +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-pipe/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Pipe output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Pipe output. + +Pipe events to stdin of another program. You can use fields from the +event as parts of the command. +WARNING: This feature can cause logstash to fork off multiple children if you are not carefull with per-event commandline. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Pipe Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ttl>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-command"] +===== `command` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Command line to launch and pipe to + +[id="{version}-plugins-{type}s-{plugin}-message_format"] +===== `message_format` + + * Value type is <> + * There is no default value for this setting. + +The format to use when writing events to the pipe. This value +supports any string and can include `%{name}` and other dynamic +strings. + +If this setting is omitted, the full json representation of the +event will be written as a single line. + +[id="{version}-plugins-{type}s-{plugin}-ttl"] +===== `ttl` + + * Value type is <> + * Default value is `10` + +Close pipe that hasn't been used for TTL seconds. -1 or 0 means never close. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rabbitmq-index.asciidoc b/docs/versioned-plugins/outputs/rabbitmq-index.asciidoc new file mode 100644 index 000000000..c52a53ee9 --- /dev/null +++ b/docs/versioned-plugins/outputs/rabbitmq-index.asciidoc @@ -0,0 +1,24 @@ +:plugin: rabbitmq +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2018-01-09 +| <> | 2017-11-13 +| <> | 2017-09-20 +| <> | 2017-08-16 +| <> | 2017-08-02 +| <> | 2017-07-08 +| <> | 2017-06-23 +|======================================================================= + +include::rabbitmq-v5.1.0.asciidoc[] +include::rabbitmq-v5.0.3.asciidoc[] +include::rabbitmq-v5.0.2.asciidoc[] +include::rabbitmq-v5.0.1.asciidoc[] +include::rabbitmq-v5.0.0.asciidoc[] +include::rabbitmq-v4.0.9.asciidoc[] +include::rabbitmq-v4.0.8.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/rabbitmq-v4.0.8.asciidoc b/docs/versioned-plugins/outputs/rabbitmq-v4.0.8.asciidoc new file mode 100644 index 000000000..b468cd039 --- /dev/null +++ b/docs/versioned-plugins/outputs/rabbitmq-v4.0.8.asciidoc @@ -0,0 +1,279 @@ +:plugin: rabbitmq +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.8 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-rabbitmq/blob/v4.0.8/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Rabbitmq output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push events to a RabbitMQ exchange. Requires RabbitMQ 2.x +or later version (3.x is recommended). + +Relevant links: + +* http://www.rabbitmq.com/[RabbitMQ] +* http://rubymarchhare.info[March Hare] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rabbitmq Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>, one of `["fanout", "direct", "topic", "x-consistent-hash", "x-modulus-hash"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-persistent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-arguments"] +===== `arguments` + + * Value type is <> + * Default value is `{}` + +Extra queue arguments as an array. +To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` + +[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] +===== `automatic_recovery` + + * Value type is <> + * Default value is `true` + +Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! + +[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] +===== `connect_retry_interval` + + * Value type is <> + * Default value is `1` + +Time in seconds to wait before retrying a connection + +[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] +===== `connection_timeout` + + * Value type is <> + * There is no default value for this setting. + +The default connection timeout in milliseconds. If not specified the timeout is infinite. + +[id="{version}-plugins-{type}s-{plugin}-debug"] +===== `debug` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `false` + +Enable or disable logging + +[id="{version}-plugins-{type}s-{plugin}-durable"] +===== `durable` + + * Value type is <> + * Default value is `true` + +Is this exchange durable? (aka; Should it survive a broker restart?) + +[id="{version}-plugins-{type}s-{plugin}-exchange"] +===== `exchange` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the exchange + +[id="{version}-plugins-{type}s-{plugin}-exchange_type"] +===== `exchange_type` + + * This is a required setting. + * Value can be any of: `fanout`, `direct`, `topic`, `x-consistent-hash`, `x-modulus-hash` + * There is no default value for this setting. + +The exchange type (fanout, topic, direct) + +[id="{version}-plugins-{type}s-{plugin}-heartbeat"] +===== `heartbeat` + + * Value type is <> + * There is no default value for this setting. + +Heartbeat delay in seconds. If unspecified no heartbeats will be sent + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Common functionality for the rabbitmq input/output +RabbitMQ server address(es) +host can either be a single host, or a list of hosts +i.e. + host => "localhost" +or + host => ["host01", "host02] + +if multiple hosts are provided on the initial connection and any subsequent +recovery attempts of the hosts is chosen at random and connected to. +Note that only one host connection is active at a time. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * Default value is `"logstash"` + +The default codec for this plugin is JSON. You can override this to suit your particular needs however. +Key to route to by default. Defaults to 'logstash' + +* Routing keys are ignored on fanout exchanges. + +[id="{version}-plugins-{type}s-{plugin}-passive"] +===== `passive` + + * Value type is <> + * Default value is `false` + +Passive queue creation? Useful for checking queue existance without modifying server state + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ password + +[id="{version}-plugins-{type}s-{plugin}-persistent"] +===== `persistent` + + * Value type is <> + * Default value is `true` + +Should RabbitMQ persist messages to disk? + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5672` + +RabbitMQ port to connect on + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable or disable SSL. +Note that by default remote certificate verification is off. +Specify ssl_certificate_path and ssl_certificate_password if you need +certificate verification + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] +===== `ssl_certificate_password` + + * Value type is <> + * There is no default value for this setting. + +Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] +===== `ssl_certificate_path` + + * Value type is <> + * There is no default value for this setting. + +Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host + +[id="{version}-plugins-{type}s-{plugin}-ssl_version"] +===== `ssl_version` + + * Value type is <> + * Default value is `"TLSv1.2"` + +Version of the SSL protocol to use. + +[id="{version}-plugins-{type}s-{plugin}-tls_certificate_password"] +===== `tls_certificate_password` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +TLS certificate password + +[id="{version}-plugins-{type}s-{plugin}-tls_certificate_path"] +===== `tls_certificate_path` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +TLS certifcate path + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ username + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `"/"` + +The vhost (virtual host) to use. If you don't know what this +is, leave the default. With the exception of the default +vhost ("/"), names of vhosts should not begin with a forward +slash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rabbitmq-v4.0.9.asciidoc b/docs/versioned-plugins/outputs/rabbitmq-v4.0.9.asciidoc new file mode 100644 index 000000000..0aa3f6974 --- /dev/null +++ b/docs/versioned-plugins/outputs/rabbitmq-v4.0.9.asciidoc @@ -0,0 +1,293 @@ +:plugin: rabbitmq +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.9 +:release_date: 2017-07-08 +:changelog_url: https://github.com/logstash-plugins/logstash-output-rabbitmq/blob/v4.0.9/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rabbitmq output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push events to a RabbitMQ exchange. Requires RabbitMQ 2.x +or later version (3.x is recommended). + +Relevant links: + +* http://www.rabbitmq.com/[RabbitMQ] +* http://rubymarchhare.info[March Hare] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rabbitmq Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>, one of `["fanout", "direct", "topic", "x-consistent-hash", "x-modulus-hash"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_properties>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-persistent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-arguments"] +===== `arguments` + + * Value type is <> + * Default value is `{}` + +Extra queue arguments as an array. +To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` + +[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] +===== `automatic_recovery` + + * Value type is <> + * Default value is `true` + +Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! + +[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] +===== `connect_retry_interval` + + * Value type is <> + * Default value is `1` + +Time in seconds to wait before retrying a connection + +[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] +===== `connection_timeout` + + * Value type is <> + * There is no default value for this setting. + +The default connection timeout in milliseconds. If not specified the timeout is infinite. + +[id="{version}-plugins-{type}s-{plugin}-debug"] +===== `debug` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `false` + +Enable or disable logging + +[id="{version}-plugins-{type}s-{plugin}-durable"] +===== `durable` + + * Value type is <> + * Default value is `true` + +Is this exchange durable? (aka; Should it survive a broker restart?) + +[id="{version}-plugins-{type}s-{plugin}-exchange"] +===== `exchange` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the exchange + +[id="{version}-plugins-{type}s-{plugin}-exchange_type"] +===== `exchange_type` + + * This is a required setting. + * Value can be any of: `fanout`, `direct`, `topic`, `x-consistent-hash`, `x-modulus-hash` + * There is no default value for this setting. + +The exchange type (fanout, topic, direct) + +[id="{version}-plugins-{type}s-{plugin}-heartbeat"] +===== `heartbeat` + + * Value type is <> + * There is no default value for this setting. + +Heartbeat delay in seconds. If unspecified no heartbeats will be sent + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Common functionality for the rabbitmq input/output +RabbitMQ server address(es) +host can either be a single host, or a list of hosts +i.e. + host => "localhost" +or + host => ["host01", "host02] + +if multiple hosts are provided on the initial connection and any subsequent +recovery attempts of the hosts is chosen at random and connected to. +Note that only one host connection is active at a time. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * Default value is `"logstash"` + +The default codec for this plugin is JSON. You can override this to suit your particular needs however. +Key to route to by default. Defaults to 'logstash' + +* Routing keys are ignored on fanout exchanges. + +[id="{version}-plugins-{type}s-{plugin}-message_properties"] +===== `message_properties` + + * Value type is <> + * Default value is `{}` + +Add properties to be set per-message here, such as 'Content-Type', 'Priority' + +Example: +[source,ruby] + message_properties => { "priority" => "1" } + + +[id="{version}-plugins-{type}s-{plugin}-passive"] +===== `passive` + + * Value type is <> + * Default value is `false` + +Passive queue creation? Useful for checking queue existance without modifying server state + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ password + +[id="{version}-plugins-{type}s-{plugin}-persistent"] +===== `persistent` + + * Value type is <> + * Default value is `true` + +Should RabbitMQ persist messages to disk? + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5672` + +RabbitMQ port to connect on + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable or disable SSL. +Note that by default remote certificate verification is off. +Specify ssl_certificate_path and ssl_certificate_password if you need +certificate verification + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] +===== `ssl_certificate_password` + + * Value type is <> + * There is no default value for this setting. + +Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] +===== `ssl_certificate_path` + + * Value type is <> + * There is no default value for this setting. + +Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host + +[id="{version}-plugins-{type}s-{plugin}-ssl_version"] +===== `ssl_version` + + * Value type is <> + * Default value is `"TLSv1.2"` + +Version of the SSL protocol to use. + +[id="{version}-plugins-{type}s-{plugin}-tls_certificate_password"] +===== `tls_certificate_password` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +TLS certificate password + +[id="{version}-plugins-{type}s-{plugin}-tls_certificate_path"] +===== `tls_certificate_path` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +TLS certifcate path + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ username + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `"/"` + +The vhost (virtual host) to use. If you don't know what this +is, leave the default. With the exception of the default +vhost ("/"), names of vhosts should not begin with a forward +slash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rabbitmq-v5.0.0.asciidoc b/docs/versioned-plugins/outputs/rabbitmq-v5.0.0.asciidoc new file mode 100644 index 000000000..2e6d28776 --- /dev/null +++ b/docs/versioned-plugins/outputs/rabbitmq-v5.0.0.asciidoc @@ -0,0 +1,266 @@ +:plugin: rabbitmq +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.0 +:release_date: 2017-08-02 +:changelog_url: https://github.com/logstash-plugins/logstash-output-rabbitmq/blob/v5.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rabbitmq output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push events to a RabbitMQ exchange. Requires RabbitMQ 2.x +or later version (3.x is recommended). + +Relevant links: + +* http://www.rabbitmq.com/[RabbitMQ] +* http://rubymarchhare.info[March Hare] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rabbitmq Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>, one of `["fanout", "direct", "topic", "x-consistent-hash", "x-modulus-hash"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_properties>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-persistent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-arguments"] +===== `arguments` + + * Value type is <> + * Default value is `{}` + +Extra queue arguments as an array. +To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` + +[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] +===== `automatic_recovery` + + * Value type is <> + * Default value is `true` + +Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! + +[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] +===== `connect_retry_interval` + + * Value type is <> + * Default value is `1` + +Time in seconds to wait before retrying a connection + +[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] +===== `connection_timeout` + + * Value type is <> + * There is no default value for this setting. + +The default connection timeout in milliseconds. If not specified the timeout is infinite. + +[id="{version}-plugins-{type}s-{plugin}-durable"] +===== `durable` + + * Value type is <> + * Default value is `true` + +Is this exchange durable? (aka; Should it survive a broker restart?) + +[id="{version}-plugins-{type}s-{plugin}-exchange"] +===== `exchange` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the exchange + +[id="{version}-plugins-{type}s-{plugin}-exchange_type"] +===== `exchange_type` + + * This is a required setting. + * Value can be any of: `fanout`, `direct`, `topic`, `x-consistent-hash`, `x-modulus-hash` + * There is no default value for this setting. + +The exchange type (fanout, topic, direct) + +[id="{version}-plugins-{type}s-{plugin}-heartbeat"] +===== `heartbeat` + + * Value type is <> + * There is no default value for this setting. + +Heartbeat delay in seconds. If unspecified no heartbeats will be sent + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Common functionality for the rabbitmq input/output +RabbitMQ server address(es) +host can either be a single host, or a list of hosts +i.e. + host => "localhost" +or + host => ["host01", "host02] + +if multiple hosts are provided on the initial connection and any subsequent +recovery attempts of the hosts is chosen at random and connected to. +Note that only one host connection is active at a time. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * Default value is `"logstash"` + +The default codec for this plugin is JSON. You can override this to suit your particular needs however. +Key to route to by default. Defaults to 'logstash' + +* Routing keys are ignored on fanout exchanges. + +[id="{version}-plugins-{type}s-{plugin}-message_properties"] +===== `message_properties` + + * Value type is <> + * Default value is `{}` + +Add properties to be set per-message here, such as 'Content-Type', 'Priority' + +Example: +[source,ruby] + message_properties => { "priority" => "1" } + + +[id="{version}-plugins-{type}s-{plugin}-passive"] +===== `passive` + + * Value type is <> + * Default value is `false` + +Passive queue creation? Useful for checking queue existance without modifying server state + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ password + +[id="{version}-plugins-{type}s-{plugin}-persistent"] +===== `persistent` + + * Value type is <> + * Default value is `true` + +Should RabbitMQ persist messages to disk? + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5672` + +RabbitMQ port to connect on + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable or disable SSL. +Note that by default remote certificate verification is off. +Specify ssl_certificate_path and ssl_certificate_password if you need +certificate verification + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] +===== `ssl_certificate_password` + + * Value type is <> + * There is no default value for this setting. + +Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] +===== `ssl_certificate_path` + + * Value type is <> + * There is no default value for this setting. + +Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host + +[id="{version}-plugins-{type}s-{plugin}-ssl_version"] +===== `ssl_version` + + * Value type is <> + * Default value is `"TLSv1.2"` + +Version of the SSL protocol to use. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ username + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `"/"` + +The vhost (virtual host) to use. If you don't know what this +is, leave the default. With the exception of the default +vhost ("/"), names of vhosts should not begin with a forward +slash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rabbitmq-v5.0.1.asciidoc b/docs/versioned-plugins/outputs/rabbitmq-v5.0.1.asciidoc new file mode 100644 index 000000000..708861ed7 --- /dev/null +++ b/docs/versioned-plugins/outputs/rabbitmq-v5.0.1.asciidoc @@ -0,0 +1,266 @@ +:plugin: rabbitmq +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.1 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-rabbitmq/blob/v5.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rabbitmq output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push events to a RabbitMQ exchange. Requires RabbitMQ 2.x +or later version (3.x is recommended). + +Relevant links: + +* http://www.rabbitmq.com/[RabbitMQ] +* http://rubymarchhare.info[March Hare] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rabbitmq Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>, one of `["fanout", "direct", "topic", "x-consistent-hash", "x-modulus-hash"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_properties>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-persistent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-arguments"] +===== `arguments` + + * Value type is <> + * Default value is `{}` + +Extra queue arguments as an array. +To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` + +[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] +===== `automatic_recovery` + + * Value type is <> + * Default value is `true` + +Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! + +[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] +===== `connect_retry_interval` + + * Value type is <> + * Default value is `1` + +Time in seconds to wait before retrying a connection + +[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] +===== `connection_timeout` + + * Value type is <> + * There is no default value for this setting. + +The default connection timeout in milliseconds. If not specified the timeout is infinite. + +[id="{version}-plugins-{type}s-{plugin}-durable"] +===== `durable` + + * Value type is <> + * Default value is `true` + +Is this exchange durable? (aka; Should it survive a broker restart?) + +[id="{version}-plugins-{type}s-{plugin}-exchange"] +===== `exchange` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the exchange + +[id="{version}-plugins-{type}s-{plugin}-exchange_type"] +===== `exchange_type` + + * This is a required setting. + * Value can be any of: `fanout`, `direct`, `topic`, `x-consistent-hash`, `x-modulus-hash` + * There is no default value for this setting. + +The exchange type (fanout, topic, direct) + +[id="{version}-plugins-{type}s-{plugin}-heartbeat"] +===== `heartbeat` + + * Value type is <> + * There is no default value for this setting. + +Heartbeat delay in seconds. If unspecified no heartbeats will be sent + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Common functionality for the rabbitmq input/output +RabbitMQ server address(es) +host can either be a single host, or a list of hosts +i.e. + host => "localhost" +or + host => ["host01", "host02] + +if multiple hosts are provided on the initial connection and any subsequent +recovery attempts of the hosts is chosen at random and connected to. +Note that only one host connection is active at a time. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * Default value is `"logstash"` + +The default codec for this plugin is JSON. You can override this to suit your particular needs however. +Key to route to by default. Defaults to 'logstash' + +* Routing keys are ignored on fanout exchanges. + +[id="{version}-plugins-{type}s-{plugin}-message_properties"] +===== `message_properties` + + * Value type is <> + * Default value is `{}` + +Add properties to be set per-message here, such as 'Content-Type', 'Priority' + +Example: +[source,ruby] + message_properties => { "priority" => "1" } + + +[id="{version}-plugins-{type}s-{plugin}-passive"] +===== `passive` + + * Value type is <> + * Default value is `false` + +Passive queue creation? Useful for checking queue existance without modifying server state + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ password + +[id="{version}-plugins-{type}s-{plugin}-persistent"] +===== `persistent` + + * Value type is <> + * Default value is `true` + +Should RabbitMQ persist messages to disk? + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5672` + +RabbitMQ port to connect on + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable or disable SSL. +Note that by default remote certificate verification is off. +Specify ssl_certificate_path and ssl_certificate_password if you need +certificate verification + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] +===== `ssl_certificate_password` + + * Value type is <> + * There is no default value for this setting. + +Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] +===== `ssl_certificate_path` + + * Value type is <> + * There is no default value for this setting. + +Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host + +[id="{version}-plugins-{type}s-{plugin}-ssl_version"] +===== `ssl_version` + + * Value type is <> + * Default value is `"TLSv1.2"` + +Version of the SSL protocol to use. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ username + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `"/"` + +The vhost (virtual host) to use. If you don't know what this +is, leave the default. With the exception of the default +vhost ("/"), names of vhosts should not begin with a forward +slash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rabbitmq-v5.0.2.asciidoc b/docs/versioned-plugins/outputs/rabbitmq-v5.0.2.asciidoc new file mode 100644 index 000000000..f2f5a5adf --- /dev/null +++ b/docs/versioned-plugins/outputs/rabbitmq-v5.0.2.asciidoc @@ -0,0 +1,266 @@ +:plugin: rabbitmq +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.2 +:release_date: 2017-09-20 +:changelog_url: https://github.com/logstash-plugins/logstash-output-rabbitmq/blob/v5.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rabbitmq output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push events to a RabbitMQ exchange. Requires RabbitMQ 2.x +or later version (3.x is recommended). + +Relevant links: + +* http://www.rabbitmq.com/[RabbitMQ] +* http://rubymarchhare.info[March Hare] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rabbitmq Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>, one of `["fanout", "direct", "topic", "x-consistent-hash", "x-modulus-hash"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_properties>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-persistent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-arguments"] +===== `arguments` + + * Value type is <> + * Default value is `{}` + +Extra queue arguments as an array. +To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` + +[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] +===== `automatic_recovery` + + * Value type is <> + * Default value is `true` + +Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! + +[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] +===== `connect_retry_interval` + + * Value type is <> + * Default value is `1` + +Time in seconds to wait before retrying a connection + +[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] +===== `connection_timeout` + + * Value type is <> + * There is no default value for this setting. + +The default connection timeout in milliseconds. If not specified the timeout is infinite. + +[id="{version}-plugins-{type}s-{plugin}-durable"] +===== `durable` + + * Value type is <> + * Default value is `true` + +Is this exchange durable? (aka; Should it survive a broker restart?) + +[id="{version}-plugins-{type}s-{plugin}-exchange"] +===== `exchange` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the exchange + +[id="{version}-plugins-{type}s-{plugin}-exchange_type"] +===== `exchange_type` + + * This is a required setting. + * Value can be any of: `fanout`, `direct`, `topic`, `x-consistent-hash`, `x-modulus-hash` + * There is no default value for this setting. + +The exchange type (fanout, topic, direct) + +[id="{version}-plugins-{type}s-{plugin}-heartbeat"] +===== `heartbeat` + + * Value type is <> + * There is no default value for this setting. + +Heartbeat delay in seconds. If unspecified no heartbeats will be sent + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Common functionality for the rabbitmq input/output +RabbitMQ server address(es) +host can either be a single host, or a list of hosts +i.e. + host => "localhost" +or + host => ["host01", "host02] + +if multiple hosts are provided on the initial connection and any subsequent +recovery attempts of the hosts is chosen at random and connected to. +Note that only one host connection is active at a time. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * Default value is `"logstash"` + +The default codec for this plugin is JSON. You can override this to suit your particular needs however. +Key to route to by default. Defaults to 'logstash' + +* Routing keys are ignored on fanout exchanges. + +[id="{version}-plugins-{type}s-{plugin}-message_properties"] +===== `message_properties` + + * Value type is <> + * Default value is `{}` + +Add properties to be set per-message here, such as 'Content-Type', 'Priority' + +Example: +[source,ruby] + message_properties => { "priority" => "1" } + + +[id="{version}-plugins-{type}s-{plugin}-passive"] +===== `passive` + + * Value type is <> + * Default value is `false` + +Passive queue creation? Useful for checking queue existance without modifying server state + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ password + +[id="{version}-plugins-{type}s-{plugin}-persistent"] +===== `persistent` + + * Value type is <> + * Default value is `true` + +Should RabbitMQ persist messages to disk? + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5672` + +RabbitMQ port to connect on + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable or disable SSL. +Note that by default remote certificate verification is off. +Specify ssl_certificate_path and ssl_certificate_password if you need +certificate verification + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] +===== `ssl_certificate_password` + + * Value type is <> + * There is no default value for this setting. + +Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] +===== `ssl_certificate_path` + + * Value type is <> + * There is no default value for this setting. + +Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host + +[id="{version}-plugins-{type}s-{plugin}-ssl_version"] +===== `ssl_version` + + * Value type is <> + * Default value is `"TLSv1.2"` + +Version of the SSL protocol to use. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ username + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `"/"` + +The vhost (virtual host) to use. If you don't know what this +is, leave the default. With the exception of the default +vhost ("/"), names of vhosts should not begin with a forward +slash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rabbitmq-v5.0.3.asciidoc b/docs/versioned-plugins/outputs/rabbitmq-v5.0.3.asciidoc new file mode 100644 index 000000000..780baefdb --- /dev/null +++ b/docs/versioned-plugins/outputs/rabbitmq-v5.0.3.asciidoc @@ -0,0 +1,266 @@ +:plugin: rabbitmq +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.3 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-rabbitmq/blob/v5.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rabbitmq output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push events to a RabbitMQ exchange. Requires RabbitMQ 2.x +or later version (3.x is recommended). + +Relevant links: + +* http://www.rabbitmq.com/[RabbitMQ] +* http://rubymarchhare.info[March Hare] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rabbitmq Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>, one of `["fanout", "direct", "topic", "x-consistent-hash", "x-modulus-hash"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_properties>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-persistent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-arguments"] +===== `arguments` + + * Value type is <> + * Default value is `{}` + +Extra queue arguments as an array. +To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` + +[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] +===== `automatic_recovery` + + * Value type is <> + * Default value is `true` + +Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! + +[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] +===== `connect_retry_interval` + + * Value type is <> + * Default value is `1` + +Time in seconds to wait before retrying a connection + +[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] +===== `connection_timeout` + + * Value type is <> + * There is no default value for this setting. + +The default connection timeout in milliseconds. If not specified the timeout is infinite. + +[id="{version}-plugins-{type}s-{plugin}-durable"] +===== `durable` + + * Value type is <> + * Default value is `true` + +Is this exchange durable? (aka; Should it survive a broker restart?) + +[id="{version}-plugins-{type}s-{plugin}-exchange"] +===== `exchange` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the exchange + +[id="{version}-plugins-{type}s-{plugin}-exchange_type"] +===== `exchange_type` + + * This is a required setting. + * Value can be any of: `fanout`, `direct`, `topic`, `x-consistent-hash`, `x-modulus-hash` + * There is no default value for this setting. + +The exchange type (fanout, topic, direct) + +[id="{version}-plugins-{type}s-{plugin}-heartbeat"] +===== `heartbeat` + + * Value type is <> + * There is no default value for this setting. + +Heartbeat delay in seconds. If unspecified no heartbeats will be sent + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Common functionality for the rabbitmq input/output +RabbitMQ server address(es) +host can either be a single host, or a list of hosts +i.e. + host => "localhost" +or + host => ["host01", "host02] + +if multiple hosts are provided on the initial connection and any subsequent +recovery attempts of the hosts is chosen at random and connected to. +Note that only one host connection is active at a time. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * Default value is `"logstash"` + +The default codec for this plugin is JSON. You can override this to suit your particular needs however. +Key to route to by default. Defaults to 'logstash' + +* Routing keys are ignored on fanout exchanges. + +[id="{version}-plugins-{type}s-{plugin}-message_properties"] +===== `message_properties` + + * Value type is <> + * Default value is `{}` + +Add properties to be set per-message here, such as 'Content-Type', 'Priority' + +Example: +[source,ruby] + message_properties => { "priority" => "1" } + + +[id="{version}-plugins-{type}s-{plugin}-passive"] +===== `passive` + + * Value type is <> + * Default value is `false` + +Passive queue creation? Useful for checking queue existance without modifying server state + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ password + +[id="{version}-plugins-{type}s-{plugin}-persistent"] +===== `persistent` + + * Value type is <> + * Default value is `true` + +Should RabbitMQ persist messages to disk? + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5672` + +RabbitMQ port to connect on + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable or disable SSL. +Note that by default remote certificate verification is off. +Specify ssl_certificate_path and ssl_certificate_password if you need +certificate verification + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] +===== `ssl_certificate_password` + + * Value type is <> + * There is no default value for this setting. + +Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] +===== `ssl_certificate_path` + + * Value type is <> + * There is no default value for this setting. + +Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host + +[id="{version}-plugins-{type}s-{plugin}-ssl_version"] +===== `ssl_version` + + * Value type is <> + * Default value is `"TLSv1.2"` + +Version of the SSL protocol to use. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ username + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `"/"` + +The vhost (virtual host) to use. If you don't know what this +is, leave the default. With the exception of the default +vhost ("/"), names of vhosts should not begin with a forward +slash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rabbitmq-v5.1.0.asciidoc b/docs/versioned-plugins/outputs/rabbitmq-v5.1.0.asciidoc new file mode 100644 index 000000000..96681e495 --- /dev/null +++ b/docs/versioned-plugins/outputs/rabbitmq-v5.1.0.asciidoc @@ -0,0 +1,266 @@ +:plugin: rabbitmq +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.1.0 +:release_date: 2018-01-09 +:changelog_url: https://github.com/logstash-plugins/logstash-output-rabbitmq/blob/v5.1.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rabbitmq output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push events to a RabbitMQ exchange. Requires RabbitMQ 2.x +or later version (3.x is recommended). + +Relevant links: + +* http://www.rabbitmq.com/[RabbitMQ] +* http://rubymarchhare.info[March Hare] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rabbitmq Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>, one of `["fanout", "direct", "topic", "x-consistent-hash", "x-modulus-hash"]`|Yes +| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_properties>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-persistent>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-arguments"] +===== `arguments` + + * Value type is <> + * Default value is `{}` + +Extra queue arguments as an array. +To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` + +[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] +===== `automatic_recovery` + + * Value type is <> + * Default value is `true` + +Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! + +[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] +===== `connect_retry_interval` + + * Value type is <> + * Default value is `1` + +Time in seconds to wait before retrying a connection + +[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] +===== `connection_timeout` + + * Value type is <> + * There is no default value for this setting. + +The default connection timeout in milliseconds. If not specified the timeout is infinite. + +[id="{version}-plugins-{type}s-{plugin}-durable"] +===== `durable` + + * Value type is <> + * Default value is `true` + +Is this exchange durable? (aka; Should it survive a broker restart?) + +[id="{version}-plugins-{type}s-{plugin}-exchange"] +===== `exchange` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the exchange + +[id="{version}-plugins-{type}s-{plugin}-exchange_type"] +===== `exchange_type` + + * This is a required setting. + * Value can be any of: `fanout`, `direct`, `topic`, `x-consistent-hash`, `x-modulus-hash` + * There is no default value for this setting. + +The exchange type (fanout, topic, direct) + +[id="{version}-plugins-{type}s-{plugin}-heartbeat"] +===== `heartbeat` + + * Value type is <> + * There is no default value for this setting. + +Heartbeat delay in seconds. If unspecified no heartbeats will be sent + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Common functionality for the rabbitmq input/output +RabbitMQ server address(es) +host can either be a single host, or a list of hosts +i.e. + host => "localhost" +or + host => ["host01", "host02] + +if multiple hosts are provided on the initial connection and any subsequent +recovery attempts of the hosts is chosen at random and connected to. +Note that only one host connection is active at a time. + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * Default value is `"logstash"` + +The default codec for this plugin is JSON. You can override this to suit your particular needs however. +Key to route to by default. Defaults to 'logstash' + +* Routing keys are ignored on fanout exchanges. + +[id="{version}-plugins-{type}s-{plugin}-message_properties"] +===== `message_properties` + + * Value type is <> + * Default value is `{}` + +Add properties to be set per-message here, such as 'Content-Type', 'Priority' + +Example: +[source,ruby] + message_properties => { "priority" => "1" } + + +[id="{version}-plugins-{type}s-{plugin}-passive"] +===== `passive` + + * Value type is <> + * Default value is `false` + +Passive queue creation? Useful for checking queue existance without modifying server state + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ password + +[id="{version}-plugins-{type}s-{plugin}-persistent"] +===== `persistent` + + * Value type is <> + * Default value is `true` + +Should RabbitMQ persist messages to disk? + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5672` + +RabbitMQ port to connect on + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * There is no default value for this setting. + +Enable or disable SSL. +Note that by default remote certificate verification is off. +Specify ssl_certificate_path and ssl_certificate_password if you need +certificate verification + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] +===== `ssl_certificate_password` + + * Value type is <> + * There is no default value for this setting. + +Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] +===== `ssl_certificate_path` + + * Value type is <> + * There is no default value for this setting. + +Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host + +[id="{version}-plugins-{type}s-{plugin}-ssl_version"] +===== `ssl_version` + + * Value type is <> + * Default value is `"TLSv1.2"` + +Version of the SSL protocol to use. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `"guest"` + +RabbitMQ username + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `"/"` + +The vhost (virtual host) to use. If you don't know what this +is, leave the default. With the exception of the default +vhost ("/"), names of vhosts should not begin with a forward +slash. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rackspace-index.asciidoc b/docs/versioned-plugins/outputs/rackspace-index.asciidoc new file mode 100644 index 000000000..025a83289 --- /dev/null +++ b/docs/versioned-plugins/outputs/rackspace-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: rackspace +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::rackspace-v2.0.7.asciidoc[] +include::rackspace-v2.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/rackspace-v2.0.5.asciidoc b/docs/versioned-plugins/outputs/rackspace-v2.0.5.asciidoc new file mode 100644 index 000000000..ab1500c42 --- /dev/null +++ b/docs/versioned-plugins/outputs/rackspace-v2.0.5.asciidoc @@ -0,0 +1,91 @@ +:plugin: rackspace +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-rackspace/blob/v2.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Rackspace output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rackspace Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-api_key"] +===== `api_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Rackspace Cloud API Key + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * Value type is <> + * Default value is `"logstash"` + +Rackspace Queue Name + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value type is <> + * Default value is `"dfw"` + +Rackspace region +ord, dfw, lon, syd, etc + +[id="{version}-plugins-{type}s-{plugin}-ttl"] +===== `ttl` + + * Value type is <> + * Default value is `360` + +time for item to live in queue + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Rackspace Cloud Username + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rackspace-v2.0.7.asciidoc b/docs/versioned-plugins/outputs/rackspace-v2.0.7.asciidoc new file mode 100644 index 000000000..923b9d7ee --- /dev/null +++ b/docs/versioned-plugins/outputs/rackspace-v2.0.7.asciidoc @@ -0,0 +1,91 @@ +:plugin: rackspace +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.7 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-rackspace/blob/v2.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Rackspace output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Sends events to a Rackspace Cloud Queue service. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Rackspace Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ttl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-api_key"] +===== `api_key` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Rackspace Cloud API Key + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * Value type is <> + * Default value is `"logstash"` + +Rackspace Queue Name + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value type is <> + * Default value is `"dfw"` + +Rackspace region +ord, dfw, lon, syd, etc + +[id="{version}-plugins-{type}s-{plugin}-ttl"] +===== `ttl` + + * Value type is <> + * Default value is `360` + +time for item to live in queue + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +Rackspace Cloud Username + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rados-index.asciidoc b/docs/versioned-plugins/outputs/rados-index.asciidoc new file mode 100644 index 000000000..5bef240c6 --- /dev/null +++ b/docs/versioned-plugins/outputs/rados-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: rados +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + diff --git a/docs/versioned-plugins/outputs/redis-index.asciidoc b/docs/versioned-plugins/outputs/redis-index.asciidoc new file mode 100644 index 000000000..57e4d7c7a --- /dev/null +++ b/docs/versioned-plugins/outputs/redis-index.asciidoc @@ -0,0 +1,22 @@ +:plugin: redis +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-09-12 +| <> | 2017-08-16 +| <> | 2017-07-27 +| <> | 2017-08-18 +| <> | 2017-06-23 +|======================================================================= + +include::redis-v4.0.3.asciidoc[] +include::redis-v4.0.2.asciidoc[] +include::redis-v4.0.1.asciidoc[] +include::redis-v4.0.0.asciidoc[] +include::redis-v3.0.5.asciidoc[] +include::redis-v3.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/redis-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/redis-v3.0.4.asciidoc new file mode 100644 index 000000000..2f61c8777 --- /dev/null +++ b/docs/versioned-plugins/outputs/redis-v3.0.4.asciidoc @@ -0,0 +1,221 @@ +:plugin: redis +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-redis/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Redis output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output will send events to a Redis queue using RPUSH. +The RPUSH command is supported in Redis v0.0.7+. Using +PUBLISH to a channel requires at least v1.3.8+. +While you may be able to make these Redis versions work, +the best performance and stability will be found in more +recent stable versions. Versions 2.6.0+ are recommended. + +For more information, see http://redis.io/[the Redis homepage] + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Redis Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-congestion_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel"]`|No +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-shuffle_hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-batch"] +===== `batch` + + * Value type is <> + * Default value is `false` + +Set to true if you want Redis to batch up values and send 1 RPUSH command +instead of one command per value to push on the list. Note that this only +works with `data_type="list"` mode right now. + +If true, we send an RPUSH every "batch_events" events or +"batch_timeout" seconds (whichever comes first). +Only supported for `data_type` is "list". + +[id="{version}-plugins-{type}s-{plugin}-batch_events"] +===== `batch_events` + + * Value type is <> + * Default value is `50` + +If batch is set to true, the number of events we queue up for an RPUSH. + +[id="{version}-plugins-{type}s-{plugin}-batch_timeout"] +===== `batch_timeout` + + * Value type is <> + * Default value is `5` + +If batch is set to true, the maximum amount of time between RPUSH commands +when there are pending events to flush. + +[id="{version}-plugins-{type}s-{plugin}-congestion_interval"] +===== `congestion_interval` + + * Value type is <> + * Default value is `1` + +How often to check for congestion. Default is one second. +Zero means to check on every event. + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` + + * Value type is <> + * Default value is `0` + +In case Redis `data_type` is `list` and has more than `@congestion_threshold` items, +block until someone consumes them and reduces congestion, otherwise if there are +no consumers Redis will run out of memory, unless it was configured with OOM protection. +But even with OOM protection, a single Redis list can block all other users of Redis, +until Redis CPU consumption reaches the max allowed RAM size. +A default value of 0 means that this limit is disabled. +Only supported for `list` Redis `data_type`. + +[id="{version}-plugins-{type}s-{plugin}-data_type"] +===== `data_type` + + * Value can be any of: `list`, `channel` + * There is no default value for this setting. + +Either list or channel. If `redis_type` is list, then we will set +RPUSH to key. If `redis_type` is channel, then we will PUBLISH to `key`. + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * Value type is <> + * Default value is `0` + +The Redis database number. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `["127.0.0.1"]` + +The hostname(s) of your Redis server(s). Ports may be specified on any +hostname, which will override the global port config. +If the hosts list is an array, Logstash will pick one random host to connect to, +if that host is disconnected it will then pick another. + +For example: +[source,ruby] + "127.0.0.1" + ["127.0.0.1", "127.0.0.2"] + ["127.0.0.1:6380", "127.0.0.1"] + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * There is no default value for this setting. + +The name of a Redis list or channel. Dynamic names are +valid here, for example `logstash-%{type}`. + +[id="{version}-plugins-{type}s-{plugin}-name"] +===== `name` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `"default"` + +Name is used for logging in case there are multiple instances. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate with. There is no authentication by default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6379` + +The default port to connect on. Can be overridden on any hostname. + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +The name of the Redis queue (we'll use RPUSH on this). Dynamic names are +valid here, for example `logstash-%{type}` + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `1` + +Interval for reconnecting to failed Redis connections + +[id="{version}-plugins-{type}s-{plugin}-shuffle_hosts"] +===== `shuffle_hosts` + + * Value type is <> + * Default value is `true` + +Shuffle the host list during Logstash startup. + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `5` + +Redis initial connection timeout in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/redis-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/redis-v3.0.5.asciidoc new file mode 100644 index 000000000..8f3b1939a --- /dev/null +++ b/docs/versioned-plugins/outputs/redis-v3.0.5.asciidoc @@ -0,0 +1,221 @@ +:plugin: redis +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-08-18 +:changelog_url: https://github.com/logstash-plugins/logstash-output-redis/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Redis output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output will send events to a Redis queue using RPUSH. +The RPUSH command is supported in Redis v0.0.7+. Using +PUBLISH to a channel requires at least v1.3.8+. +While you may be able to make these Redis versions work, +the best performance and stability will be found in more +recent stable versions. Versions 2.6.0+ are recommended. + +For more information, see http://redis.io/[the Redis homepage] + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Redis Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-congestion_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel"]`|No +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-shuffle_hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-batch"] +===== `batch` + + * Value type is <> + * Default value is `false` + +Set to true if you want Redis to batch up values and send 1 RPUSH command +instead of one command per value to push on the list. Note that this only +works with `data_type="list"` mode right now. + +If true, we send an RPUSH every "batch_events" events or +"batch_timeout" seconds (whichever comes first). +Only supported for `data_type` is "list". + +[id="{version}-plugins-{type}s-{plugin}-batch_events"] +===== `batch_events` + + * Value type is <> + * Default value is `50` + +If batch is set to true, the number of events we queue up for an RPUSH. + +[id="{version}-plugins-{type}s-{plugin}-batch_timeout"] +===== `batch_timeout` + + * Value type is <> + * Default value is `5` + +If batch is set to true, the maximum amount of time between RPUSH commands +when there are pending events to flush. + +[id="{version}-plugins-{type}s-{plugin}-congestion_interval"] +===== `congestion_interval` + + * Value type is <> + * Default value is `1` + +How often to check for congestion. Default is one second. +Zero means to check on every event. + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` + + * Value type is <> + * Default value is `0` + +In case Redis `data_type` is `list` and has more than `@congestion_threshold` items, +block until someone consumes them and reduces congestion, otherwise if there are +no consumers Redis will run out of memory, unless it was configured with OOM protection. +But even with OOM protection, a single Redis list can block all other users of Redis, +until Redis CPU consumption reaches the max allowed RAM size. +A default value of 0 means that this limit is disabled. +Only supported for `list` Redis `data_type`. + +[id="{version}-plugins-{type}s-{plugin}-data_type"] +===== `data_type` + + * Value can be any of: `list`, `channel` + * There is no default value for this setting. + +Either list or channel. If `redis_type` is list, then we will set +RPUSH to key. If `redis_type` is channel, then we will PUBLISH to `key`. + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * Value type is <> + * Default value is `0` + +The Redis database number. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `["127.0.0.1"]` + +The hostname(s) of your Redis server(s). Ports may be specified on any +hostname, which will override the global port config. +If the hosts list is an array, Logstash will pick one random host to connect to, +if that host is disconnected it will then pick another. + +For example: +[source,ruby] + "127.0.0.1" + ["127.0.0.1", "127.0.0.2"] + ["127.0.0.1:6380", "127.0.0.1"] + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * There is no default value for this setting. + +The name of a Redis list or channel. Dynamic names are +valid here, for example `logstash-%{type}`. + +[id="{version}-plugins-{type}s-{plugin}-name"] +===== `name` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `"default"` + +Name is used for logging in case there are multiple instances. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate with. There is no authentication by default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6379` + +The default port to connect on. Can be overridden on any hostname. + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +The name of the Redis queue (we'll use RPUSH on this). Dynamic names are +valid here, for example `logstash-%{type}` + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `1` + +Interval for reconnecting to failed Redis connections + +[id="{version}-plugins-{type}s-{plugin}-shuffle_hosts"] +===== `shuffle_hosts` + + * Value type is <> + * Default value is `true` + +Shuffle the host list during Logstash startup. + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `5` + +Redis initial connection timeout in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/redis-v4.0.0.asciidoc b/docs/versioned-plugins/outputs/redis-v4.0.0.asciidoc new file mode 100644 index 000000000..494445566 --- /dev/null +++ b/docs/versioned-plugins/outputs/redis-v4.0.0.asciidoc @@ -0,0 +1,202 @@ +:plugin: redis +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.0 +:release_date: 2017-07-27 +:changelog_url: https://github.com/logstash-plugins/logstash-output-redis/blob/v4.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Redis output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output will send events to a Redis queue using RPUSH. +The RPUSH command is supported in Redis v0.0.7+. Using +PUBLISH to a channel requires at least v1.3.8+. +While you may be able to make these Redis versions work, +the best performance and stability will be found in more +recent stable versions. Versions 2.6.0+ are recommended. + +For more information, see http://redis.io/[the Redis homepage] + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Redis Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-congestion_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel"]`|No +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-shuffle_hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-batch"] +===== `batch` + + * Value type is <> + * Default value is `false` + +Set to true if you want Redis to batch up values and send 1 RPUSH command +instead of one command per value to push on the list. Note that this only +works with `data_type="list"` mode right now. + +If true, we send an RPUSH every "batch_events" events or +"batch_timeout" seconds (whichever comes first). +Only supported for `data_type` is "list". + +[id="{version}-plugins-{type}s-{plugin}-batch_events"] +===== `batch_events` + + * Value type is <> + * Default value is `50` + +If batch is set to true, the number of events we queue up for an RPUSH. + +[id="{version}-plugins-{type}s-{plugin}-batch_timeout"] +===== `batch_timeout` + + * Value type is <> + * Default value is `5` + +If batch is set to true, the maximum amount of time between RPUSH commands +when there are pending events to flush. + +[id="{version}-plugins-{type}s-{plugin}-congestion_interval"] +===== `congestion_interval` + + * Value type is <> + * Default value is `1` + +How often to check for congestion. Default is one second. +Zero means to check on every event. + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` + + * Value type is <> + * Default value is `0` + +In case Redis `data_type` is `list` and has more than `@congestion_threshold` items, +block until someone consumes them and reduces congestion, otherwise if there are +no consumers Redis will run out of memory, unless it was configured with OOM protection. +But even with OOM protection, a single Redis list can block all other users of Redis, +until Redis CPU consumption reaches the max allowed RAM size. +A default value of 0 means that this limit is disabled. +Only supported for `list` Redis `data_type`. + +[id="{version}-plugins-{type}s-{plugin}-data_type"] +===== `data_type` + + * Value can be any of: `list`, `channel` + * There is no default value for this setting. + +Either list or channel. If `data_type` is list, then we will set +RPUSH to key. If `data_type` is channel, then we will PUBLISH to `key`. + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * Value type is <> + * Default value is `0` + +The Redis database number. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `["127.0.0.1"]` + +The hostname(s) of your Redis server(s). Ports may be specified on any +hostname, which will override the global port config. +If the hosts list is an array, Logstash will pick one random host to connect to, +if that host is disconnected it will then pick another. + +For example: +[source,ruby] + "127.0.0.1" + ["127.0.0.1", "127.0.0.2"] + ["127.0.0.1:6380", "127.0.0.1"] + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * There is no default value for this setting. + +The name of a Redis list or channel. Dynamic names are +valid here, for example `logstash-%{type}`. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate with. There is no authentication by default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6379` + +The default port to connect on. Can be overridden on any hostname. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `1` + +Interval for reconnecting to failed Redis connections + +[id="{version}-plugins-{type}s-{plugin}-shuffle_hosts"] +===== `shuffle_hosts` + + * Value type is <> + * Default value is `true` + +Shuffle the host list during Logstash startup. + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `5` + +Redis initial connection timeout in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/redis-v4.0.1.asciidoc b/docs/versioned-plugins/outputs/redis-v4.0.1.asciidoc new file mode 100644 index 000000000..e89cebc75 --- /dev/null +++ b/docs/versioned-plugins/outputs/redis-v4.0.1.asciidoc @@ -0,0 +1,202 @@ +:plugin: redis +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.1 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-redis/blob/v4.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Redis output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output will send events to a Redis queue using RPUSH. +The RPUSH command is supported in Redis v0.0.7+. Using +PUBLISH to a channel requires at least v1.3.8+. +While you may be able to make these Redis versions work, +the best performance and stability will be found in more +recent stable versions. Versions 2.6.0+ are recommended. + +For more information, see http://redis.io/[the Redis homepage] + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Redis Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-congestion_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel"]`|No +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-shuffle_hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-batch"] +===== `batch` + + * Value type is <> + * Default value is `false` + +Set to true if you want Redis to batch up values and send 1 RPUSH command +instead of one command per value to push on the list. Note that this only +works with `data_type="list"` mode right now. + +If true, we send an RPUSH every "batch_events" events or +"batch_timeout" seconds (whichever comes first). +Only supported for `data_type` is "list". + +[id="{version}-plugins-{type}s-{plugin}-batch_events"] +===== `batch_events` + + * Value type is <> + * Default value is `50` + +If batch is set to true, the number of events we queue up for an RPUSH. + +[id="{version}-plugins-{type}s-{plugin}-batch_timeout"] +===== `batch_timeout` + + * Value type is <> + * Default value is `5` + +If batch is set to true, the maximum amount of time between RPUSH commands +when there are pending events to flush. + +[id="{version}-plugins-{type}s-{plugin}-congestion_interval"] +===== `congestion_interval` + + * Value type is <> + * Default value is `1` + +How often to check for congestion. Default is one second. +Zero means to check on every event. + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` + + * Value type is <> + * Default value is `0` + +In case Redis `data_type` is `list` and has more than `@congestion_threshold` items, +block until someone consumes them and reduces congestion, otherwise if there are +no consumers Redis will run out of memory, unless it was configured with OOM protection. +But even with OOM protection, a single Redis list can block all other users of Redis, +until Redis CPU consumption reaches the max allowed RAM size. +A default value of 0 means that this limit is disabled. +Only supported for `list` Redis `data_type`. + +[id="{version}-plugins-{type}s-{plugin}-data_type"] +===== `data_type` + + * Value can be any of: `list`, `channel` + * There is no default value for this setting. + +Either list or channel. If `data_type` is list, then we will set +RPUSH to key. If `data_type` is channel, then we will PUBLISH to `key`. + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * Value type is <> + * Default value is `0` + +The Redis database number. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `["127.0.0.1"]` + +The hostname(s) of your Redis server(s). Ports may be specified on any +hostname, which will override the global port config. +If the hosts list is an array, Logstash will pick one random host to connect to, +if that host is disconnected it will then pick another. + +For example: +[source,ruby] + "127.0.0.1" + ["127.0.0.1", "127.0.0.2"] + ["127.0.0.1:6380", "127.0.0.1"] + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * There is no default value for this setting. + +The name of a Redis list or channel. Dynamic names are +valid here, for example `logstash-%{type}`. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate with. There is no authentication by default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6379` + +The default port to connect on. Can be overridden on any hostname. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `1` + +Interval for reconnecting to failed Redis connections + +[id="{version}-plugins-{type}s-{plugin}-shuffle_hosts"] +===== `shuffle_hosts` + + * Value type is <> + * Default value is `true` + +Shuffle the host list during Logstash startup. + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `5` + +Redis initial connection timeout in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/redis-v4.0.2.asciidoc b/docs/versioned-plugins/outputs/redis-v4.0.2.asciidoc new file mode 100644 index 000000000..9c65d3678 --- /dev/null +++ b/docs/versioned-plugins/outputs/redis-v4.0.2.asciidoc @@ -0,0 +1,202 @@ +:plugin: redis +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.2 +:release_date: 2017-09-12 +:changelog_url: https://github.com/logstash-plugins/logstash-output-redis/blob/v4.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Redis output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output will send events to a Redis queue using RPUSH. +The RPUSH command is supported in Redis v0.0.7+. Using +PUBLISH to a channel requires at least v1.3.8+. +While you may be able to make these Redis versions work, +the best performance and stability will be found in more +recent stable versions. Versions 2.6.0+ are recommended. + +For more information, see http://redis.io/[the Redis homepage] + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Redis Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-congestion_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel"]`|No +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-shuffle_hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-batch"] +===== `batch` + + * Value type is <> + * Default value is `false` + +Set to true if you want Redis to batch up values and send 1 RPUSH command +instead of one command per value to push on the list. Note that this only +works with `data_type="list"` mode right now. + +If true, we send an RPUSH every "batch_events" events or +"batch_timeout" seconds (whichever comes first). +Only supported for `data_type` is "list". + +[id="{version}-plugins-{type}s-{plugin}-batch_events"] +===== `batch_events` + + * Value type is <> + * Default value is `50` + +If batch is set to true, the number of events we queue up for an RPUSH. + +[id="{version}-plugins-{type}s-{plugin}-batch_timeout"] +===== `batch_timeout` + + * Value type is <> + * Default value is `5` + +If batch is set to true, the maximum amount of time between RPUSH commands +when there are pending events to flush. + +[id="{version}-plugins-{type}s-{plugin}-congestion_interval"] +===== `congestion_interval` + + * Value type is <> + * Default value is `1` + +How often to check for congestion. Default is one second. +Zero means to check on every event. + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` + + * Value type is <> + * Default value is `0` + +In case Redis `data_type` is `list` and has more than `@congestion_threshold` items, +block until someone consumes them and reduces congestion, otherwise if there are +no consumers Redis will run out of memory, unless it was configured with OOM protection. +But even with OOM protection, a single Redis list can block all other users of Redis, +until Redis CPU consumption reaches the max allowed RAM size. +A default value of 0 means that this limit is disabled. +Only supported for `list` Redis `data_type`. + +[id="{version}-plugins-{type}s-{plugin}-data_type"] +===== `data_type` + + * Value can be any of: `list`, `channel` + * There is no default value for this setting. + +Either list or channel. If `data_type` is list, then we will set +RPUSH to key. If `data_type` is channel, then we will PUBLISH to `key`. + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * Value type is <> + * Default value is `0` + +The Redis database number. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `["127.0.0.1"]` + +The hostname(s) of your Redis server(s). Ports may be specified on any +hostname, which will override the global port config. +If the hosts list is an array, Logstash will pick one random host to connect to, +if that host is disconnected it will then pick another. + +For example: +[source,ruby] + "127.0.0.1" + ["127.0.0.1", "127.0.0.2"] + ["127.0.0.1:6380", "127.0.0.1"] + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * There is no default value for this setting. + +The name of a Redis list or channel. Dynamic names are +valid here, for example `logstash-%{type}`. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate with. There is no authentication by default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6379` + +The default port to connect on. Can be overridden on any hostname. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `1` + +Interval for reconnecting to failed Redis connections + +[id="{version}-plugins-{type}s-{plugin}-shuffle_hosts"] +===== `shuffle_hosts` + + * Value type is <> + * Default value is `true` + +Shuffle the host list during Logstash startup. + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `5` + +Redis initial connection timeout in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/redis-v4.0.3.asciidoc b/docs/versioned-plugins/outputs/redis-v4.0.3.asciidoc new file mode 100644 index 000000000..acf8fbbb3 --- /dev/null +++ b/docs/versioned-plugins/outputs/redis-v4.0.3.asciidoc @@ -0,0 +1,202 @@ +:plugin: redis +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.3 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-redis/blob/v4.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Redis output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output will send events to a Redis queue using RPUSH. +The RPUSH command is supported in Redis v0.0.7+. Using +PUBLISH to a channel requires at least v1.3.8+. +While you may be able to make these Redis versions work, +the best performance and stability will be found in more +recent stable versions. Versions 2.6.0+ are recommended. + +For more information, see http://redis.io/[the Redis homepage] + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Redis Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-congestion_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel"]`|No +| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-shuffle_hosts>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-batch"] +===== `batch` + + * Value type is <> + * Default value is `false` + +Set to true if you want Redis to batch up values and send 1 RPUSH command +instead of one command per value to push on the list. Note that this only +works with `data_type="list"` mode right now. + +If true, we send an RPUSH every "batch_events" events or +"batch_timeout" seconds (whichever comes first). +Only supported for `data_type` is "list". + +[id="{version}-plugins-{type}s-{plugin}-batch_events"] +===== `batch_events` + + * Value type is <> + * Default value is `50` + +If batch is set to true, the number of events we queue up for an RPUSH. + +[id="{version}-plugins-{type}s-{plugin}-batch_timeout"] +===== `batch_timeout` + + * Value type is <> + * Default value is `5` + +If batch is set to true, the maximum amount of time between RPUSH commands +when there are pending events to flush. + +[id="{version}-plugins-{type}s-{plugin}-congestion_interval"] +===== `congestion_interval` + + * Value type is <> + * Default value is `1` + +How often to check for congestion. Default is one second. +Zero means to check on every event. + +[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] +===== `congestion_threshold` + + * Value type is <> + * Default value is `0` + +In case Redis `data_type` is `list` and has more than `@congestion_threshold` items, +block until someone consumes them and reduces congestion, otherwise if there are +no consumers Redis will run out of memory, unless it was configured with OOM protection. +But even with OOM protection, a single Redis list can block all other users of Redis, +until Redis CPU consumption reaches the max allowed RAM size. +A default value of 0 means that this limit is disabled. +Only supported for `list` Redis `data_type`. + +[id="{version}-plugins-{type}s-{plugin}-data_type"] +===== `data_type` + + * Value can be any of: `list`, `channel` + * There is no default value for this setting. + +Either list or channel. If `data_type` is list, then we will set +RPUSH to key. If `data_type` is channel, then we will PUBLISH to `key`. + +[id="{version}-plugins-{type}s-{plugin}-db"] +===== `db` + + * Value type is <> + * Default value is `0` + +The Redis database number. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `["127.0.0.1"]` + +The hostname(s) of your Redis server(s). Ports may be specified on any +hostname, which will override the global port config. +If the hosts list is an array, Logstash will pick one random host to connect to, +if that host is disconnected it will then pick another. + +For example: +[source,ruby] + "127.0.0.1" + ["127.0.0.1", "127.0.0.2"] + ["127.0.0.1:6380", "127.0.0.1"] + +[id="{version}-plugins-{type}s-{plugin}-key"] +===== `key` + + * Value type is <> + * There is no default value for this setting. + +The name of a Redis list or channel. Dynamic names are +valid here, for example `logstash-%{type}`. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * There is no default value for this setting. + +Password to authenticate with. There is no authentication by default. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `6379` + +The default port to connect on. Can be overridden on any hostname. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `1` + +Interval for reconnecting to failed Redis connections + +[id="{version}-plugins-{type}s-{plugin}-shuffle_hosts"] +===== `shuffle_hosts` + + * Value type is <> + * Default value is `true` + +Shuffle the host list during Logstash startup. + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `5` + +Redis initial connection timeout in seconds. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/redmine-index.asciidoc b/docs/versioned-plugins/outputs/redmine-index.asciidoc new file mode 100644 index 000000000..e97f2498a --- /dev/null +++ b/docs/versioned-plugins/outputs/redmine-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: redmine +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::redmine-v3.0.3.asciidoc[] +include::redmine-v3.0.2.asciidoc[] +include::redmine-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/redmine-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/redmine-v3.0.1.asciidoc new file mode 100644 index 000000000..63c054c02 --- /dev/null +++ b/docs/versioned-plugins/outputs/redmine-v3.0.1.asciidoc @@ -0,0 +1,192 @@ +:plugin: redmine +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-redmine/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Redmine output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The redmine output is used to create a ticket via the API redmine. + +It send a POST request in a JSON format and use TOKEN authentication + + +-- Exemple of use -- +[source,ruby] + output { + redmine { + url => "http://redmineserver.tld" + token => 'token' + project_id => 200 + tracker_id => 1 + status_id => 3 + priority_id => 2 + subject => "Error ... detected" + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Redmine Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-assigned_to_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-categorie_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-description>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fixed_version_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent_issue_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-priority_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-status_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-subject>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-token>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tracker_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-assigned_to_id"] +===== `assigned_to_id` + + * Value type is <> + * Default value is `nil` + +redmine issue assigned_to +not required for post_issue + +[id="{version}-plugins-{type}s-{plugin}-categorie_id"] +===== `categorie_id` + + * Value type is <> + * Default value is `nil` + +not required for post_issue + +[id="{version}-plugins-{type}s-{plugin}-description"] +===== `description` + + * Value type is <> + * Default value is `"%{message}"` + +redmine issue description +required + +[id="{version}-plugins-{type}s-{plugin}-fixed_version_id"] +===== `fixed_version_id` + + * Value type is <> + * Default value is `nil` + +redmine issue fixed_version_id + +[id="{version}-plugins-{type}s-{plugin}-parent_issue_id"] +===== `parent_issue_id` + + * Value type is <> + * Default value is `nil` + +redmine issue parent_issue_id +not required for post_issue + +[id="{version}-plugins-{type}s-{plugin}-priority_id"] +===== `priority_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +redmine issue priority_id +required + +[id="{version}-plugins-{type}s-{plugin}-project_id"] +===== `project_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +redmine issue projet_id +required + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + + + +[id="{version}-plugins-{type}s-{plugin}-status_id"] +===== `status_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +redmine issue status_id +required + +[id="{version}-plugins-{type}s-{plugin}-subject"] +===== `subject` + + * Value type is <> + * Default value is `"%{host}"` + +redmine issue subject +required + +[id="{version}-plugins-{type}s-{plugin}-token"] +===== `token` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +redmine token user used for authentication + +[id="{version}-plugins-{type}s-{plugin}-tracker_id"] +===== `tracker_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +redmine issue tracker_id +required + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +host of redmine app +value format : 'http://urlofredmine.tld' - Not add '/issues' at end + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/redmine-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/redmine-v3.0.2.asciidoc new file mode 100644 index 000000000..029c4dbfb --- /dev/null +++ b/docs/versioned-plugins/outputs/redmine-v3.0.2.asciidoc @@ -0,0 +1,192 @@ +:plugin: redmine +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-redmine/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Redmine output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The redmine output is used to create a ticket via the API redmine. + +It send a POST request in a JSON format and use TOKEN authentication + + +-- Exemple of use -- +[source,ruby] + output { + redmine { + url => "http://redmineserver.tld" + token => 'token' + project_id => 200 + tracker_id => 1 + status_id => 3 + priority_id => 2 + subject => "Error ... detected" + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Redmine Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-assigned_to_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-categorie_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-description>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fixed_version_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent_issue_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-priority_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-status_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-subject>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-token>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tracker_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-assigned_to_id"] +===== `assigned_to_id` + + * Value type is <> + * Default value is `nil` + +redmine issue assigned_to +not required for post_issue + +[id="{version}-plugins-{type}s-{plugin}-categorie_id"] +===== `categorie_id` + + * Value type is <> + * Default value is `nil` + +not required for post_issue + +[id="{version}-plugins-{type}s-{plugin}-description"] +===== `description` + + * Value type is <> + * Default value is `"%{message}"` + +redmine issue description +required + +[id="{version}-plugins-{type}s-{plugin}-fixed_version_id"] +===== `fixed_version_id` + + * Value type is <> + * Default value is `nil` + +redmine issue fixed_version_id + +[id="{version}-plugins-{type}s-{plugin}-parent_issue_id"] +===== `parent_issue_id` + + * Value type is <> + * Default value is `nil` + +redmine issue parent_issue_id +not required for post_issue + +[id="{version}-plugins-{type}s-{plugin}-priority_id"] +===== `priority_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +redmine issue priority_id +required + +[id="{version}-plugins-{type}s-{plugin}-project_id"] +===== `project_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +redmine issue projet_id +required + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + + + +[id="{version}-plugins-{type}s-{plugin}-status_id"] +===== `status_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +redmine issue status_id +required + +[id="{version}-plugins-{type}s-{plugin}-subject"] +===== `subject` + + * Value type is <> + * Default value is `"%{host}"` + +redmine issue subject +required + +[id="{version}-plugins-{type}s-{plugin}-token"] +===== `token` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +redmine token user used for authentication + +[id="{version}-plugins-{type}s-{plugin}-tracker_id"] +===== `tracker_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +redmine issue tracker_id +required + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +host of redmine app +value format : 'http://urlofredmine.tld' - Not add '/issues' at end + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/redmine-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/redmine-v3.0.3.asciidoc new file mode 100644 index 000000000..474b2b8c8 --- /dev/null +++ b/docs/versioned-plugins/outputs/redmine-v3.0.3.asciidoc @@ -0,0 +1,192 @@ +:plugin: redmine +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-redmine/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Redmine output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The redmine output is used to create a ticket via the API redmine. + +It send a POST request in a JSON format and use TOKEN authentication + + +-- Exemple of use -- +[source,ruby] + output { + redmine { + url => "http://redmineserver.tld" + token => 'token' + project_id => 200 + tracker_id => 1 + status_id => 3 + priority_id => 2 + subject => "Error ... detected" + } + } + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Redmine Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-assigned_to_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-categorie_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-description>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-fixed_version_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-parent_issue_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-priority_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-status_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-subject>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-token>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-tracker_id>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-assigned_to_id"] +===== `assigned_to_id` + + * Value type is <> + * Default value is `nil` + +redmine issue assigned_to +not required for post_issue + +[id="{version}-plugins-{type}s-{plugin}-categorie_id"] +===== `categorie_id` + + * Value type is <> + * Default value is `nil` + +not required for post_issue + +[id="{version}-plugins-{type}s-{plugin}-description"] +===== `description` + + * Value type is <> + * Default value is `"%{message}"` + +redmine issue description +required + +[id="{version}-plugins-{type}s-{plugin}-fixed_version_id"] +===== `fixed_version_id` + + * Value type is <> + * Default value is `nil` + +redmine issue fixed_version_id + +[id="{version}-plugins-{type}s-{plugin}-parent_issue_id"] +===== `parent_issue_id` + + * Value type is <> + * Default value is `nil` + +redmine issue parent_issue_id +not required for post_issue + +[id="{version}-plugins-{type}s-{plugin}-priority_id"] +===== `priority_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +redmine issue priority_id +required + +[id="{version}-plugins-{type}s-{plugin}-project_id"] +===== `project_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +redmine issue projet_id +required + +[id="{version}-plugins-{type}s-{plugin}-ssl"] +===== `ssl` + + * Value type is <> + * Default value is `false` + + + +[id="{version}-plugins-{type}s-{plugin}-status_id"] +===== `status_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +redmine issue status_id +required + +[id="{version}-plugins-{type}s-{plugin}-subject"] +===== `subject` + + * Value type is <> + * Default value is `"%{host}"` + +redmine issue subject +required + +[id="{version}-plugins-{type}s-{plugin}-token"] +===== `token` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +redmine token user used for authentication + +[id="{version}-plugins-{type}s-{plugin}-tracker_id"] +===== `tracker_id` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +redmine issue tracker_id +required + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +host of redmine app +value format : 'http://urlofredmine.tld' - Not add '/issues' at end + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/riak-index.asciidoc b/docs/versioned-plugins/outputs/riak-index.asciidoc new file mode 100644 index 000000000..e0af7b4c2 --- /dev/null +++ b/docs/versioned-plugins/outputs/riak-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: riak +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::riak-v3.0.3.asciidoc[] +include::riak-v3.0.2.asciidoc[] +include::riak-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/riak-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/riak-v3.0.1.asciidoc new file mode 100644 index 000000000..c1c739f52 --- /dev/null +++ b/docs/versioned-plugins/outputs/riak-v3.0.1.asciidoc @@ -0,0 +1,177 @@ +:plugin: riak +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-riak/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Riak output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Riak is a distributed k/v store from Basho. +It's based on the Dynamo model. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Riak Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bucket_props>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-enable_search>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-enable_ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-indices>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nodes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proto>> |<>, one of `["http", "pb"]`|No +| <<{version}-plugins-{type}s-{plugin}-ssl_opts>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * Value type is <> + * Default value is `["logstash-%{+YYYY.MM.dd}"]` + +The bucket name to write events to +Expansion is supported here as values are +passed through event.sprintf +Multiple buckets can be specified here +but any bucket-specific settings defined +apply to ALL the buckets. + +[id="{version}-plugins-{type}s-{plugin}-bucket_props"] +===== `bucket_props` + + * Value type is <> + * There is no default value for this setting. + +Bucket properties (NYI) +Logstash hash of properties for the bucket +i.e. +[source,ruby] + bucket_props => { + "r" => "one" + "w" => "one" + "dw", "one + } +or +[source,ruby] + bucket_props => { "n_val" => "3" } +Properties will be passed as-is + +[id="{version}-plugins-{type}s-{plugin}-enable_search"] +===== `enable_search` + + * Value type is <> + * Default value is `false` + +Search +Enable search on the bucket defined above + +[id="{version}-plugins-{type}s-{plugin}-enable_ssl"] +===== `enable_ssl` + + * Value type is <> + * Default value is `false` + +SSL +Enable SSL + +[id="{version}-plugins-{type}s-{plugin}-indices"] +===== `indices` + + * Value type is <> + * There is no default value for this setting. + +Indices +Array of fields to add 2i on +e.g. +[source,ruby] + `indices => ["source_host", "type"] +Off by default as not everyone runs eleveldb + +[id="{version}-plugins-{type}s-{plugin}-key_name"] +===== `key_name` + + * Value type is <> + * There is no default value for this setting. + +The event key name +variables are valid here. + +Choose this carefully. Best to let riak decide. + +[id="{version}-plugins-{type}s-{plugin}-nodes"] +===== `nodes` + + * Value type is <> + * Default value is `{"localhost"=>"8098"}` + +The nodes of your Riak cluster +This can be a single host or +a Logstash hash of node/port pairs +e.g +[source,ruby] + { + "node1" => "8098" + "node2" => "8098" + } + +[id="{version}-plugins-{type}s-{plugin}-proto"] +===== `proto` + + * Value can be any of: `http`, `pb` + * Default value is `"http"` + +The protocol to use +HTTP or ProtoBuf +Applies to ALL backends listed above +No mix and match + +[id="{version}-plugins-{type}s-{plugin}-ssl_opts"] +===== `ssl_opts` + + * Value type is <> + * There is no default value for this setting. + +SSL Options +Options for SSL connections +Only applied if SSL is enabled +Logstash hash that maps to the riak-client options +here: https://github.com/basho/riak-ruby-client/wiki/Connecting-to-Riak +You'll likely want something like this: + +[source, ruby] + ssl_opts => { + "pem" => "/etc/riak.pem" + "ca_path" => "/usr/share/certificates" + } + +Per the riak client docs, the above sample options +will turn on SSL `VERIFY_PEER` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/riak-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/riak-v3.0.2.asciidoc new file mode 100644 index 000000000..6ad882972 --- /dev/null +++ b/docs/versioned-plugins/outputs/riak-v3.0.2.asciidoc @@ -0,0 +1,177 @@ +:plugin: riak +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-riak/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Riak output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Riak is a distributed k/v store from Basho. +It's based on the Dynamo model. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Riak Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bucket_props>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-enable_search>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-enable_ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-indices>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nodes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proto>> |<>, one of `["http", "pb"]`|No +| <<{version}-plugins-{type}s-{plugin}-ssl_opts>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * Value type is <> + * Default value is `["logstash-%{+YYYY.MM.dd}"]` + +The bucket name to write events to +Expansion is supported here as values are +passed through event.sprintf +Multiple buckets can be specified here +but any bucket-specific settings defined +apply to ALL the buckets. + +[id="{version}-plugins-{type}s-{plugin}-bucket_props"] +===== `bucket_props` + + * Value type is <> + * There is no default value for this setting. + +Bucket properties (NYI) +Logstash hash of properties for the bucket +i.e. +[source,ruby] + bucket_props => { + "r" => "one" + "w" => "one" + "dw", "one + } +or +[source,ruby] + bucket_props => { "n_val" => "3" } +Properties will be passed as-is + +[id="{version}-plugins-{type}s-{plugin}-enable_search"] +===== `enable_search` + + * Value type is <> + * Default value is `false` + +Search +Enable search on the bucket defined above + +[id="{version}-plugins-{type}s-{plugin}-enable_ssl"] +===== `enable_ssl` + + * Value type is <> + * Default value is `false` + +SSL +Enable SSL + +[id="{version}-plugins-{type}s-{plugin}-indices"] +===== `indices` + + * Value type is <> + * There is no default value for this setting. + +Indices +Array of fields to add 2i on +e.g. +[source,ruby] + `indices => ["source_host", "type"] +Off by default as not everyone runs eleveldb + +[id="{version}-plugins-{type}s-{plugin}-key_name"] +===== `key_name` + + * Value type is <> + * There is no default value for this setting. + +The event key name +variables are valid here. + +Choose this carefully. Best to let riak decide. + +[id="{version}-plugins-{type}s-{plugin}-nodes"] +===== `nodes` + + * Value type is <> + * Default value is `{"localhost"=>"8098"}` + +The nodes of your Riak cluster +This can be a single host or +a Logstash hash of node/port pairs +e.g +[source,ruby] + { + "node1" => "8098" + "node2" => "8098" + } + +[id="{version}-plugins-{type}s-{plugin}-proto"] +===== `proto` + + * Value can be any of: `http`, `pb` + * Default value is `"http"` + +The protocol to use +HTTP or ProtoBuf +Applies to ALL backends listed above +No mix and match + +[id="{version}-plugins-{type}s-{plugin}-ssl_opts"] +===== `ssl_opts` + + * Value type is <> + * There is no default value for this setting. + +SSL Options +Options for SSL connections +Only applied if SSL is enabled +Logstash hash that maps to the riak-client options +here: https://github.com/basho/riak-ruby-client/wiki/Connecting-to-Riak +You'll likely want something like this: + +[source, ruby] + ssl_opts => { + "pem" => "/etc/riak.pem" + "ca_path" => "/usr/share/certificates" + } + +Per the riak client docs, the above sample options +will turn on SSL `VERIFY_PEER` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/riak-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/riak-v3.0.3.asciidoc new file mode 100644 index 000000000..08762aeed --- /dev/null +++ b/docs/versioned-plugins/outputs/riak-v3.0.3.asciidoc @@ -0,0 +1,177 @@ +:plugin: riak +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-riak/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Riak output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Riak is a distributed k/v store from Basho. +It's based on the Dynamo model. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Riak Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bucket_props>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-enable_search>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-enable_ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-indices>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-key_name>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-nodes>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proto>> |<>, one of `["http", "pb"]`|No +| <<{version}-plugins-{type}s-{plugin}-ssl_opts>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * Value type is <> + * Default value is `["logstash-%{+YYYY.MM.dd}"]` + +The bucket name to write events to +Expansion is supported here as values are +passed through event.sprintf +Multiple buckets can be specified here +but any bucket-specific settings defined +apply to ALL the buckets. + +[id="{version}-plugins-{type}s-{plugin}-bucket_props"] +===== `bucket_props` + + * Value type is <> + * There is no default value for this setting. + +Bucket properties (NYI) +Logstash hash of properties for the bucket +i.e. +[source,ruby] + bucket_props => { + "r" => "one" + "w" => "one" + "dw", "one + } +or +[source,ruby] + bucket_props => { "n_val" => "3" } +Properties will be passed as-is + +[id="{version}-plugins-{type}s-{plugin}-enable_search"] +===== `enable_search` + + * Value type is <> + * Default value is `false` + +Search +Enable search on the bucket defined above + +[id="{version}-plugins-{type}s-{plugin}-enable_ssl"] +===== `enable_ssl` + + * Value type is <> + * Default value is `false` + +SSL +Enable SSL + +[id="{version}-plugins-{type}s-{plugin}-indices"] +===== `indices` + + * Value type is <> + * There is no default value for this setting. + +Indices +Array of fields to add 2i on +e.g. +[source,ruby] + `indices => ["source_host", "type"] +Off by default as not everyone runs eleveldb + +[id="{version}-plugins-{type}s-{plugin}-key_name"] +===== `key_name` + + * Value type is <> + * There is no default value for this setting. + +The event key name +variables are valid here. + +Choose this carefully. Best to let riak decide. + +[id="{version}-plugins-{type}s-{plugin}-nodes"] +===== `nodes` + + * Value type is <> + * Default value is `{"localhost"=>"8098"}` + +The nodes of your Riak cluster +This can be a single host or +a Logstash hash of node/port pairs +e.g +[source,ruby] + { + "node1" => "8098" + "node2" => "8098" + } + +[id="{version}-plugins-{type}s-{plugin}-proto"] +===== `proto` + + * Value can be any of: `http`, `pb` + * Default value is `"http"` + +The protocol to use +HTTP or ProtoBuf +Applies to ALL backends listed above +No mix and match + +[id="{version}-plugins-{type}s-{plugin}-ssl_opts"] +===== `ssl_opts` + + * Value type is <> + * There is no default value for this setting. + +SSL Options +Options for SSL connections +Only applied if SSL is enabled +Logstash hash that maps to the riak-client options +here: https://github.com/basho/riak-ruby-client/wiki/Connecting-to-Riak +You'll likely want something like this: + +[source, ruby] + ssl_opts => { + "pem" => "/etc/riak.pem" + "ca_path" => "/usr/share/certificates" + } + +Per the riak client docs, the above sample options +will turn on SSL `VERIFY_PEER` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/riemann-index.asciidoc b/docs/versioned-plugins/outputs/riemann-index.asciidoc new file mode 100644 index 000000000..059555469 --- /dev/null +++ b/docs/versioned-plugins/outputs/riemann-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: riemann +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::riemann-v3.0.3.asciidoc[] +include::riemann-v3.0.2.asciidoc[] +include::riemann-v3.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/riemann-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/riemann-v3.0.1.asciidoc new file mode 100644 index 000000000..511a4763f --- /dev/null +++ b/docs/versioned-plugins/outputs/riemann-v3.0.1.asciidoc @@ -0,0 +1,178 @@ +:plugin: riemann +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-riemann/blob/v3.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Riemann output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Riemann is a network event stream processing system. + +While Riemann is very similar conceptually to Logstash, it has +much more in terms of being a monitoring system replacement. + +Riemann is used in Logstash much like statsd or other metric-related +outputs + +You can learn about Riemann here: + +* http://riemann.io/ +You can see the author talk about it here: +* http://vimeo.com/38377415 + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Riemann Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-debug>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-map_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-protocol>> |<>, one of `["tcp", "udp"]`|No +| <<{version}-plugins-{type}s-{plugin}-riemann_event>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-debug"] +===== `debug` + + * Value type is <> + * Default value is `false` + + +Enable debugging output? + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The address of the Riemann server. + +[id="{version}-plugins-{type}s-{plugin}-map_fields"] +===== `map_fields` + + * Value type is <> + * Default value is `false` + +If set to true automatically map all logstash defined fields to riemann event fields. +All nested logstash fields will be mapped to riemann fields containing all parent keys +separated by dots and the deepest value. + +As an example, the logstash event: +[source,ruby] + { + "@timestamp":"2013-12-10T14:36:26.151+0000", + "@version": 1, + "message":"log message", + "host": "host.domain.com", + "nested_field": { + "key": "value" + } + } +Is mapped to this riemann event: +[source,ruby] + { + :time 1386686186, + :host host.domain.com, + :message log message, + :nested_field.key value + } + +It can be used in conjunction with or independent of the riemann_event option. +When used with the riemann_event any duplicate keys receive their value from +riemann_event instead of the logstash event itself. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5555` + +The port to connect to on your Riemann server. + +[id="{version}-plugins-{type}s-{plugin}-protocol"] +===== `protocol` + + * Value can be any of: `tcp`, `udp` + * Default value is `"tcp"` + +The protocol to use +UDP is non-blocking +TCP is blocking + +Logstash's default output behaviour +is to never lose events +As such, we use tcp as default here + +[id="{version}-plugins-{type}s-{plugin}-riemann_event"] +===== `riemann_event` + + * Value type is <> + * There is no default value for this setting. + +A Hash to set Riemann event fields +(http://riemann.io/concepts.html). + +The following event fields are supported: +`description`, `state`, `metric`, `ttl`, `service` + +Tags found on the Logstash event will automatically be added to the +Riemann event. + +Any other field set here will be passed to Riemann as an event attribute. + +Example: +[source,ruby] + riemann { + riemann_event => { + "metric" => "%{metric}" + "service" => "%{service}" + } + } + +`metric` and `ttl` values will be coerced to a floating point value. +Values which cannot be coerced will zero (0.0). + +`description`, by default, will be set to the event message +but can be overridden here. + +[id="{version}-plugins-{type}s-{plugin}-sender"] +===== `sender` + + * Value type is <> + * Default value is `"%{host}"` + +The name of the sender. +This sets the `host` value +in the Riemann event + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/riemann-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/riemann-v3.0.2.asciidoc new file mode 100644 index 000000000..05127d6f2 --- /dev/null +++ b/docs/versioned-plugins/outputs/riemann-v3.0.2.asciidoc @@ -0,0 +1,178 @@ +:plugin: riemann +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-riemann/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Riemann output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Riemann is a network event stream processing system. + +While Riemann is very similar conceptually to Logstash, it has +much more in terms of being a monitoring system replacement. + +Riemann is used in Logstash much like statsd or other metric-related +outputs + +You can learn about Riemann here: + +* http://riemann.io/ +You can see the author talk about it here: +* http://vimeo.com/38377415 + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Riemann Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-debug>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-map_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-protocol>> |<>, one of `["tcp", "udp"]`|No +| <<{version}-plugins-{type}s-{plugin}-riemann_event>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-debug"] +===== `debug` + + * Value type is <> + * Default value is `false` + + +Enable debugging output? + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The address of the Riemann server. + +[id="{version}-plugins-{type}s-{plugin}-map_fields"] +===== `map_fields` + + * Value type is <> + * Default value is `false` + +If set to true automatically map all logstash defined fields to riemann event fields. +All nested logstash fields will be mapped to riemann fields containing all parent keys +separated by dots and the deepest value. + +As an example, the logstash event: +[source,ruby] + { + "@timestamp":"2013-12-10T14:36:26.151+0000", + "@version": 1, + "message":"log message", + "host": "host.domain.com", + "nested_field": { + "key": "value" + } + } +Is mapped to this riemann event: +[source,ruby] + { + :time 1386686186, + :host host.domain.com, + :message log message, + :nested_field.key value + } + +It can be used in conjunction with or independent of the riemann_event option. +When used with the riemann_event any duplicate keys receive their value from +riemann_event instead of the logstash event itself. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5555` + +The port to connect to on your Riemann server. + +[id="{version}-plugins-{type}s-{plugin}-protocol"] +===== `protocol` + + * Value can be any of: `tcp`, `udp` + * Default value is `"tcp"` + +The protocol to use +UDP is non-blocking +TCP is blocking + +Logstash's default output behaviour +is to never lose events +As such, we use tcp as default here + +[id="{version}-plugins-{type}s-{plugin}-riemann_event"] +===== `riemann_event` + + * Value type is <> + * There is no default value for this setting. + +A Hash to set Riemann event fields +(http://riemann.io/concepts.html). + +The following event fields are supported: +`description`, `state`, `metric`, `ttl`, `service` + +Tags found on the Logstash event will automatically be added to the +Riemann event. + +Any other field set here will be passed to Riemann as an event attribute. + +Example: +[source,ruby] + riemann { + riemann_event => { + "metric" => "%{metric}" + "service" => "%{service}" + } + } + +`metric` and `ttl` values will be coerced to a floating point value. +Values which cannot be coerced will zero (0.0). + +`description`, by default, will be set to the event message +but can be overridden here. + +[id="{version}-plugins-{type}s-{plugin}-sender"] +===== `sender` + + * Value type is <> + * Default value is `"%{host}"` + +The name of the sender. +This sets the `host` value +in the Riemann event + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/riemann-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/riemann-v3.0.3.asciidoc new file mode 100644 index 000000000..68538cf4f --- /dev/null +++ b/docs/versioned-plugins/outputs/riemann-v3.0.3.asciidoc @@ -0,0 +1,178 @@ +:plugin: riemann +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-riemann/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Riemann output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Riemann is a network event stream processing system. + +While Riemann is very similar conceptually to Logstash, it has +much more in terms of being a monitoring system replacement. + +Riemann is used in Logstash much like statsd or other metric-related +outputs + +You can learn about Riemann here: + +* http://riemann.io/ +You can see the author talk about it here: +* http://vimeo.com/38377415 + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Riemann Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-debug>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-map_fields>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-protocol>> |<>, one of `["tcp", "udp"]`|No +| <<{version}-plugins-{type}s-{plugin}-riemann_event>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-debug"] +===== `debug` + + * Value type is <> + * Default value is `false` + + +Enable debugging output? + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The address of the Riemann server. + +[id="{version}-plugins-{type}s-{plugin}-map_fields"] +===== `map_fields` + + * Value type is <> + * Default value is `false` + +If set to true automatically map all logstash defined fields to riemann event fields. +All nested logstash fields will be mapped to riemann fields containing all parent keys +separated by dots and the deepest value. + +As an example, the logstash event: +[source,ruby] + { + "@timestamp":"2013-12-10T14:36:26.151+0000", + "@version": 1, + "message":"log message", + "host": "host.domain.com", + "nested_field": { + "key": "value" + } + } +Is mapped to this riemann event: +[source,ruby] + { + :time 1386686186, + :host host.domain.com, + :message log message, + :nested_field.key value + } + +It can be used in conjunction with or independent of the riemann_event option. +When used with the riemann_event any duplicate keys receive their value from +riemann_event instead of the logstash event itself. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `5555` + +The port to connect to on your Riemann server. + +[id="{version}-plugins-{type}s-{plugin}-protocol"] +===== `protocol` + + * Value can be any of: `tcp`, `udp` + * Default value is `"tcp"` + +The protocol to use +UDP is non-blocking +TCP is blocking + +Logstash's default output behaviour +is to never lose events +As such, we use tcp as default here + +[id="{version}-plugins-{type}s-{plugin}-riemann_event"] +===== `riemann_event` + + * Value type is <> + * There is no default value for this setting. + +A Hash to set Riemann event fields +(http://riemann.io/concepts.html). + +The following event fields are supported: +`description`, `state`, `metric`, `ttl`, `service` + +Tags found on the Logstash event will automatically be added to the +Riemann event. + +Any other field set here will be passed to Riemann as an event attribute. + +Example: +[source,ruby] + riemann { + riemann_event => { + "metric" => "%{metric}" + "service" => "%{service}" + } + } + +`metric` and `ttl` values will be coerced to a floating point value. +Values which cannot be coerced will zero (0.0). + +`description`, by default, will be set to the event message +but can be overridden here. + +[id="{version}-plugins-{type}s-{plugin}-sender"] +===== `sender` + + * Value type is <> + * Default value is `"%{host}"` + +The name of the sender. +This sets the `host` value +in the Riemann event + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/s3-index.asciidoc b/docs/versioned-plugins/outputs/s3-index.asciidoc new file mode 100644 index 000000000..d4b4e8220 --- /dev/null +++ b/docs/versioned-plugins/outputs/s3-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: s3 +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-07-19 +| <> | 2017-06-23 +|======================================================================= + +include::s3-v4.0.9.asciidoc[] +include::s3-v4.0.8.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/s3-v4.0.8.asciidoc b/docs/versioned-plugins/outputs/s3-v4.0.8.asciidoc new file mode 100644 index 000000000..81c3425ce --- /dev/null +++ b/docs/versioned-plugins/outputs/s3-v4.0.8.asciidoc @@ -0,0 +1,327 @@ +:plugin: s3 +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.8 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-s3/blob/v4.0.8/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== S3 output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +INFORMATION: + +This plugin batches and uploads logstash events into Amazon Simple Storage Service (Amazon S3). + +Requirements: +* Amazon S3 Bucket and S3 Access Permissions (Typically access_key_id and secret_access_key) +* S3 PutObject permission + +S3 outputs create temporary files into the OS' temporary directory, you can specify where to save them using the `temporary_directory` option. + +S3 output files have the following format + +ls.s3.312bc026-2f5d-49bc-ae9f-5940cf4ad9a6.2013-04-18T10.00.tag_hello.part0.txt + + +|======= +| ls.s3 | indicate logstash plugin s3 | +| 312bc026-2f5d-49bc-ae9f-5940cf4ad9a6 | a new, random uuid per file. | +| 2013-04-18T10.00 | represents the time whenever you specify time_file. | +| tag_hello | this indicates the event's tag. | +| part0 | this means if you indicate size_file then it will generate more parts if you file.size > size_file. When a file is full it will be pushed to the bucket and then deleted from the temporary directory. If a file is empty, it is simply deleted. Empty files will not be pushed | +|======= + +Crash Recovery: +* This plugin will recover and upload temporary log files after crash/abnormal termination when using `restore` set to true + + + + + + + +#### Usage: +This is an example of logstash config: +[source,ruby] +output { + s3{ + access_key_id => "crazy_key" (required) + secret_access_key => "monkey_access_key" (required) + region => "eu-west-1" (optional, default = "us-east-1") + bucket => "your_bucket" (required) + size_file => 2048 (optional) - Bytes + time_file => 5 (optional) - Minutes + codec => "plain" (optional) + canned_acl => "private" (optional. Options are "private", "public-read", "public-read-write", "authenticated-read". Defaults to "private" ) + } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== S3 Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-canned_acl>> |<>, one of `["private", "public-read", "public-read-write", "authenticated-read"]`|No +| <<{version}-plugins-{type}s-{plugin}-encoding>> |<>, one of `["none", "gzip"]`|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-restore>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-rotation_strategy>> |<>, one of `["size_and_time", "size", "time"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-server_side_encryption>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-server_side_encryption_algorithm>> |<>, one of `["AES256", "aws:kms"]`|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-signature_version>> |<>, one of `["v2", "v4"]`|No +| <<{version}-plugins-{type}s-{plugin}-size_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssekms_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-storage_class>> |<>, one of `["STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA"]`|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-time_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upload_queue_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upload_workers_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_credentials_on_root_bucket>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +S3 bucket + +[id="{version}-plugins-{type}s-{plugin}-canned_acl"] +===== `canned_acl` + + * Value can be any of: `private`, `public-read`, `public-read-write`, `authenticated-read` + * Default value is `"private"` + +The S3 canned ACL to use when putting the file. Defaults to "private". + +[id="{version}-plugins-{type}s-{plugin}-encoding"] +===== `encoding` + + * Value can be any of: `none`, `gzip` + * Default value is `"none"` + +Specify the content encoding. Supports ("gzip"). Defaults to "none" + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is <> + * Default value is `""` + +Specify a prefix to the uploaded filename, this can simulate directories on S3. Prefix does not require leading slash. +This option support string interpolation, be warned this can created a lot of temporary local files. + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-restore"] +===== `restore` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-rotation_strategy"] +===== `rotation_strategy` + + * Value can be any of: `size_and_time`, `size`, `time` + * Default value is `"size_and_time"` + +Define the strategy to use to decide when we need to rotate the file and push it to S3, +The default strategy is to check for both size and time, the first one to match will rotate the file. + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-server_side_encryption"] +===== `server_side_encryption` + + * Value type is <> + * Default value is `false` + +Specifies wether or not to use S3's server side encryption. Defaults to no encryption. + +[id="{version}-plugins-{type}s-{plugin}-server_side_encryption_algorithm"] +===== `server_side_encryption_algorithm` + + * Value can be any of: `AES256`, `aws:kms` + * Default value is `"AES256"` + +Specifies what type of encryption to use when SSE is enabled. + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-signature_version"] +===== `signature_version` + + * Value can be any of: `v2`, `v4` + * There is no default value for this setting. + +The version of the S3 signature hash to use. Normally uses the internal client default, can be explicitly +specified here + +[id="{version}-plugins-{type}s-{plugin}-size_file"] +===== `size_file` + + * Value type is <> + * Default value is `5242880` + +Set the size of file in bytes, this means that files on bucket when have dimension > file_size, they are stored in two or more file. +If you have tags then it will generate a specific size file for every tags + +[id="{version}-plugins-{type}s-{plugin}-ssekms_key_id"] +===== `ssekms_key_id` + + * Value type is <> + * There is no default value for this setting. + +The key to use when specified along with server_side_encryption => aws:kms. +If server_side_encryption => aws:kms is set but this is not default KMS key is used. +http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html + +[id="{version}-plugins-{type}s-{plugin}-storage_class"] +===== `storage_class` + + * Value can be any of: `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA` + * Default value is `"STANDARD"` + +Specifies what S3 storage class to use when uploading the file. +More information about the different storage classes can be found: +http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html +Defaults to STANDARD. + +[id="{version}-plugins-{type}s-{plugin}-temporary_directory"] +===== `temporary_directory` + + * Value type is <> + * Default value is `"/tmp/logstash"` + +Set the directory where logstash will store the tmp files before sending it to S3 +default to the current OS temporary directory in linux /tmp/logstash + +[id="{version}-plugins-{type}s-{plugin}-time_file"] +===== `time_file` + + * Value type is <> + * Default value is `15` + +Set the time, in MINUTES, to close the current sub_time_section of bucket. +If you define file_size you have a number of files in consideration of the section and the current tag. +0 stay all time on listerner, beware if you specific 0 and size_file 0, because you will not put the file on bucket, +for now the only thing this plugin can do is to put the file when logstash restart. + +[id="{version}-plugins-{type}s-{plugin}-upload_queue_size"] +===== `upload_queue_size` + + * Value type is <> + * Default value is `4` + +Number of items we can keep in the local queue before uploading them + +[id="{version}-plugins-{type}s-{plugin}-upload_workers_count"] +===== `upload_workers_count` + + * Value type is <> + * Default value is `4` + +Specify how many workers to use to upload the files to S3 + +[id="{version}-plugins-{type}s-{plugin}-validate_credentials_on_root_bucket"] +===== `validate_credentials_on_root_bucket` + + * Value type is <> + * Default value is `true` + +The common use case is to define permission on the root bucket and give Logstash full access to write its logs. +In some circonstances you need finer grained permission on subfolder, this allow you to disable the check at startup. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/s3-v4.0.9.asciidoc b/docs/versioned-plugins/outputs/s3-v4.0.9.asciidoc new file mode 100644 index 000000000..7ef2b23cc --- /dev/null +++ b/docs/versioned-plugins/outputs/s3-v4.0.9.asciidoc @@ -0,0 +1,327 @@ +:plugin: s3 +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.9 +:release_date: 2017-07-19 +:changelog_url: https://github.com/logstash-plugins/logstash-output-s3/blob/v4.0.9/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== S3 output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +INFORMATION: + +This plugin batches and uploads logstash events into Amazon Simple Storage Service (Amazon S3). + +Requirements: +* Amazon S3 Bucket and S3 Access Permissions (Typically access_key_id and secret_access_key) +* S3 PutObject permission + +S3 outputs create temporary files into the OS' temporary directory, you can specify where to save them using the `temporary_directory` option. + +S3 output files have the following format + +ls.s3.312bc026-2f5d-49bc-ae9f-5940cf4ad9a6.2013-04-18T10.00.tag_hello.part0.txt + + +|======= +| ls.s3 | indicate logstash plugin s3 | +| 312bc026-2f5d-49bc-ae9f-5940cf4ad9a6 | a new, random uuid per file. | +| 2013-04-18T10.00 | represents the time whenever you specify time_file. | +| tag_hello | this indicates the event's tag. | +| part0 | this means if you indicate size_file then it will generate more parts if you file.size > size_file. When a file is full it will be pushed to the bucket and then deleted from the temporary directory. If a file is empty, it is simply deleted. Empty files will not be pushed | +|======= + +Crash Recovery: +* This plugin will recover and upload temporary log files after crash/abnormal termination when using `restore` set to true + + + + + + + +#### Usage: +This is an example of logstash config: +[source,ruby] +output { + s3{ + access_key_id => "crazy_key" (required) + secret_access_key => "monkey_access_key" (required) + region => "eu-west-1" (optional, default = "us-east-1") + bucket => "your_bucket" (required) + size_file => 2048 (optional) - Bytes + time_file => 5 (optional) - Minutes + codec => "plain" (optional) + canned_acl => "private" (optional. Options are "private", "public-read", "public-read-write", "authenticated-read". Defaults to "private" ) + } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== S3 Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-canned_acl>> |<>, one of `["private", "public-read", "public-read-write", "authenticated-read"]`|No +| <<{version}-plugins-{type}s-{plugin}-encoding>> |<>, one of `["none", "gzip"]`|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-restore>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-rotation_strategy>> |<>, one of `["size_and_time", "size", "time"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-server_side_encryption>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-server_side_encryption_algorithm>> |<>, one of `["AES256", "aws:kms"]`|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-signature_version>> |<>, one of `["v2", "v4"]`|No +| <<{version}-plugins-{type}s-{plugin}-size_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssekms_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-storage_class>> |<>, one of `["STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA"]`|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-time_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upload_queue_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-upload_workers_count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-validate_credentials_on_root_bucket>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-bucket"] +===== `bucket` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +S3 bucket + +[id="{version}-plugins-{type}s-{plugin}-canned_acl"] +===== `canned_acl` + + * Value can be any of: `private`, `public-read`, `public-read-write`, `authenticated-read` + * Default value is `"private"` + +The S3 canned ACL to use when putting the file. Defaults to "private". + +[id="{version}-plugins-{type}s-{plugin}-encoding"] +===== `encoding` + + * Value can be any of: `none`, `gzip` + * Default value is `"none"` + +Specify the content encoding. Supports ("gzip"). Defaults to "none" + +[id="{version}-plugins-{type}s-{plugin}-prefix"] +===== `prefix` + + * Value type is <> + * Default value is `""` + +Specify a prefix to the uploaded filename, this can simulate directories on S3. Prefix does not require leading slash. +This option support string interpolation, be warned this can created a lot of temporary local files. + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-restore"] +===== `restore` + + * Value type is <> + * Default value is `true` + + + +[id="{version}-plugins-{type}s-{plugin}-rotation_strategy"] +===== `rotation_strategy` + + * Value can be any of: `size_and_time`, `size`, `time` + * Default value is `"size_and_time"` + +Define the strategy to use to decide when we need to rotate the file and push it to S3, +The default strategy is to check for both size and time, the first one to match will rotate the file. + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-server_side_encryption"] +===== `server_side_encryption` + + * Value type is <> + * Default value is `false` + +Specifies wether or not to use S3's server side encryption. Defaults to no encryption. + +[id="{version}-plugins-{type}s-{plugin}-server_side_encryption_algorithm"] +===== `server_side_encryption_algorithm` + + * Value can be any of: `AES256`, `aws:kms` + * Default value is `"AES256"` + +Specifies what type of encryption to use when SSE is enabled. + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + +[id="{version}-plugins-{type}s-{plugin}-signature_version"] +===== `signature_version` + + * Value can be any of: `v2`, `v4` + * There is no default value for this setting. + +The version of the S3 signature hash to use. Normally uses the internal client default, can be explicitly +specified here + +[id="{version}-plugins-{type}s-{plugin}-size_file"] +===== `size_file` + + * Value type is <> + * Default value is `5242880` + +Set the size of file in bytes, this means that files on bucket when have dimension > file_size, they are stored in two or more file. +If you have tags then it will generate a specific size file for every tags + +[id="{version}-plugins-{type}s-{plugin}-ssekms_key_id"] +===== `ssekms_key_id` + + * Value type is <> + * There is no default value for this setting. + +The key to use when specified along with server_side_encryption => aws:kms. +If server_side_encryption => aws:kms is set but this is not default KMS key is used. +http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html + +[id="{version}-plugins-{type}s-{plugin}-storage_class"] +===== `storage_class` + + * Value can be any of: `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA` + * Default value is `"STANDARD"` + +Specifies what S3 storage class to use when uploading the file. +More information about the different storage classes can be found: +http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html +Defaults to STANDARD. + +[id="{version}-plugins-{type}s-{plugin}-temporary_directory"] +===== `temporary_directory` + + * Value type is <> + * Default value is `"/tmp/logstash"` + +Set the directory where logstash will store the tmp files before sending it to S3 +default to the current OS temporary directory in linux /tmp/logstash + +[id="{version}-plugins-{type}s-{plugin}-time_file"] +===== `time_file` + + * Value type is <> + * Default value is `15` + +Set the time, in MINUTES, to close the current sub_time_section of bucket. +If you define file_size you have a number of files in consideration of the section and the current tag. +0 stay all time on listerner, beware if you specific 0 and size_file 0, because you will not put the file on bucket, +for now the only thing this plugin can do is to put the file when logstash restart. + +[id="{version}-plugins-{type}s-{plugin}-upload_queue_size"] +===== `upload_queue_size` + + * Value type is <> + * Default value is `4` + +Number of items we can keep in the local queue before uploading them + +[id="{version}-plugins-{type}s-{plugin}-upload_workers_count"] +===== `upload_workers_count` + + * Value type is <> + * Default value is `4` + +Specify how many workers to use to upload the files to S3 + +[id="{version}-plugins-{type}s-{plugin}-validate_credentials_on_root_bucket"] +===== `validate_credentials_on_root_bucket` + + * Value type is <> + * Default value is `true` + +The common use case is to define permission on the root bucket and give Logstash full access to write its logs. +In some circonstances you need finer grained permission on subfolder, this allow you to disable the check at startup. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/slack-index.asciidoc b/docs/versioned-plugins/outputs/slack-index.asciidoc new file mode 100644 index 000000000..c0a342b76 --- /dev/null +++ b/docs/versioned-plugins/outputs/slack-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: slack +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-12-22 +| <> | 2017-06-23 +|======================================================================= + +include::slack-v2.1.0.asciidoc[] +include::slack-v2.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/slack-v2.0.3.asciidoc b/docs/versioned-plugins/outputs/slack-v2.0.3.asciidoc new file mode 100644 index 000000000..9417316ed --- /dev/null +++ b/docs/versioned-plugins/outputs/slack-v2.0.3.asciidoc @@ -0,0 +1,107 @@ +:plugin: slack +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-slack/blob/v2.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Slack output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Slack Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-attachments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-channel>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-icon_emoji>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-icon_url>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-attachments"] +===== `attachments` + + * Value type is <> + * There is no default value for this setting. + +Attachments array as described https://api.slack.com/docs/attachments + +[id="{version}-plugins-{type}s-{plugin}-channel"] +===== `channel` + + * Value type is <> + * There is no default value for this setting. + +The channel to post to + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * Default value is `"%{message}"` + +The text to post in slack + +[id="{version}-plugins-{type}s-{plugin}-icon_emoji"] +===== `icon_emoji` + + * Value type is <> + * There is no default value for this setting. + +Emoji icon to use + +[id="{version}-plugins-{type}s-{plugin}-icon_url"] +===== `icon_url` + + * Value type is <> + * There is no default value for this setting. + +Icon URL to use + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The incoming webhook URI needed to post a message + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * Value type is <> + * There is no default value for this setting. + +The username to use for posting + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/slack-v2.1.0.asciidoc b/docs/versioned-plugins/outputs/slack-v2.1.0.asciidoc new file mode 100644 index 000000000..c012cd548 --- /dev/null +++ b/docs/versioned-plugins/outputs/slack-v2.1.0.asciidoc @@ -0,0 +1,107 @@ +:plugin: slack +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v2.1.0 +:release_date: 2017-12-22 +:changelog_url: https://github.com/logstash-plugins/logstash-output-slack/blob/v2.1.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Slack output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Slack Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-attachments>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-channel>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-icon_emoji>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-icon_url>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-attachments"] +===== `attachments` + + * Value type is <> + * There is no default value for this setting. + +Attachments array as described https://api.slack.com/docs/attachments + +[id="{version}-plugins-{type}s-{plugin}-channel"] +===== `channel` + + * Value type is <> + * There is no default value for this setting. + +The channel to post to + +[id="{version}-plugins-{type}s-{plugin}-format"] +===== `format` + + * Value type is <> + * Default value is `"%{message}"` + +The text to post in slack + +[id="{version}-plugins-{type}s-{plugin}-icon_emoji"] +===== `icon_emoji` + + * Value type is <> + * There is no default value for this setting. + +Emoji icon to use + +[id="{version}-plugins-{type}s-{plugin}-icon_url"] +===== `icon_url` + + * Value type is <> + * There is no default value for this setting. + +Icon URL to use + +[id="{version}-plugins-{type}s-{plugin}-url"] +===== `url` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The incoming webhook URI needed to post a message + +[id="{version}-plugins-{type}s-{plugin}-username"] +===== `username` + + * Value type is <> + * There is no default value for this setting. + +The username to use for posting + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/sns-index.asciidoc b/docs/versioned-plugins/outputs/sns-index.asciidoc new file mode 100644 index 000000000..a0ad6b3e3 --- /dev/null +++ b/docs/versioned-plugins/outputs/sns-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: sns +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::sns-v4.0.6.asciidoc[] +include::sns-v4.0.5.asciidoc[] +include::sns-v4.0.4.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/sns-v4.0.4.asciidoc b/docs/versioned-plugins/outputs/sns-v4.0.4.asciidoc new file mode 100644 index 000000000..54b021ffe --- /dev/null +++ b/docs/versioned-plugins/outputs/sns-v4.0.4.asciidoc @@ -0,0 +1,162 @@ +:plugin: sns +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-sns/blob/v4.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Sns output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +SNS output. + +Send events to Amazon's Simple Notification Service, a hosted pub/sub +framework. It supports various subscription types, including email, HTTP/S, SMS, and SQS. + +For further documentation about the service see: + + http://docs.amazonwebservices.com/sns/latest/api/ + +This plugin looks for the following fields on events it receives: + + * `sns` - If no ARN is found in the configuration file, this will be used as + the ARN to publish. + * `sns_subject` - The subject line that should be used. + Optional. The "%{host}" will be used if `sns_subject` is not present. The subject + will be truncated to 100 characters. If `sns_subject` is set to a non-string value a JSON version of that value will be saved. + * `sns_message` - Optional string of message to be sent. If this is set to a non-string value it will be encoded with the specified `codec`. If this is not set the entire event will be encoded with the codec. + with the @message truncated so that the length of the JSON fits in + `32768` bytes. + +==== Upgrading to 2.0.0 + +This plugin used to have a `format` option for controlling the encoding of messages prior to being sent to SNS. +This plugin now uses the logstash standard <> option for encoding instead. +If you want the same 'plain' format as the v0/1 codec (`format => "plain"`) use `codec => "s3_plain"`. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sns Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-arn>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-publish_boot_message_arn>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-arn"] +===== `arn` + + * Value type is <> + * There is no default value for this setting. + +Optional ARN to send messages to. If you do not set this you must +include the `sns` field in your events to set the ARN on a per-message basis! + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-publish_boot_message_arn"] +===== `publish_boot_message_arn` + + * Value type is <> + * There is no default value for this setting. + +When an ARN for an SNS topic is specified here, the message +"Logstash successfully booted" will be sent to it when this plugin +is registered. + +Example: arn:aws:sns:us-east-1:770975001275:logstash-testing + + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/sns-v4.0.5.asciidoc b/docs/versioned-plugins/outputs/sns-v4.0.5.asciidoc new file mode 100644 index 000000000..ebc9fd852 --- /dev/null +++ b/docs/versioned-plugins/outputs/sns-v4.0.5.asciidoc @@ -0,0 +1,162 @@ +:plugin: sns +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.5 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-sns/blob/v4.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Sns output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +SNS output. + +Send events to Amazon's Simple Notification Service, a hosted pub/sub +framework. It supports various subscription types, including email, HTTP/S, SMS, and SQS. + +For further documentation about the service see: + + http://docs.amazonwebservices.com/sns/latest/api/ + +This plugin looks for the following fields on events it receives: + + * `sns` - If no ARN is found in the configuration file, this will be used as + the ARN to publish. + * `sns_subject` - The subject line that should be used. + Optional. The "%{host}" will be used if `sns_subject` is not present. The subject + will be truncated to 100 characters. If `sns_subject` is set to a non-string value a JSON version of that value will be saved. + * `sns_message` - Optional string of message to be sent. If this is set to a non-string value it will be encoded with the specified `codec`. If this is not set the entire event will be encoded with the codec. + with the @message truncated so that the length of the JSON fits in + `32768` bytes. + +==== Upgrading to 2.0.0 + +This plugin used to have a `format` option for controlling the encoding of messages prior to being sent to SNS. +This plugin now uses the logstash standard <> option for encoding instead. +If you want the same 'plain' format as the v0/1 codec (`format => "plain"`) use `codec => "s3_plain"`. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sns Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-arn>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-publish_boot_message_arn>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-arn"] +===== `arn` + + * Value type is <> + * There is no default value for this setting. + +Optional ARN to send messages to. If you do not set this you must +include the `sns` field in your events to set the ARN on a per-message basis! + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-publish_boot_message_arn"] +===== `publish_boot_message_arn` + + * Value type is <> + * There is no default value for this setting. + +When an ARN for an SNS topic is specified here, the message +"Logstash successfully booted" will be sent to it when this plugin +is registered. + +Example: arn:aws:sns:us-east-1:770975001275:logstash-testing + + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/sns-v4.0.6.asciidoc b/docs/versioned-plugins/outputs/sns-v4.0.6.asciidoc new file mode 100644 index 000000000..c65ed531f --- /dev/null +++ b/docs/versioned-plugins/outputs/sns-v4.0.6.asciidoc @@ -0,0 +1,162 @@ +:plugin: sns +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.6 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-sns/blob/v4.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Sns output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +SNS output. + +Send events to Amazon's Simple Notification Service, a hosted pub/sub +framework. It supports various subscription types, including email, HTTP/S, SMS, and SQS. + +For further documentation about the service see: + + http://docs.amazonwebservices.com/sns/latest/api/ + +This plugin looks for the following fields on events it receives: + + * `sns` - If no ARN is found in the configuration file, this will be used as + the ARN to publish. + * `sns_subject` - The subject line that should be used. + Optional. The "%{host}" will be used if `sns_subject` is not present. The subject + will be truncated to 100 characters. If `sns_subject` is set to a non-string value a JSON version of that value will be saved. + * `sns_message` - Optional string of message to be sent. If this is set to a non-string value it will be encoded with the specified `codec`. If this is not set the entire event will be encoded with the codec. + with the @message truncated so that the length of the JSON fits in + `32768` bytes. + +==== Upgrading to 2.0.0 + +This plugin used to have a `format` option for controlling the encoding of messages prior to being sent to SNS. +This plugin now uses the logstash standard <> option for encoding instead. +If you want the same 'plain' format as the v0/1 codec (`format => "plain"`) use `codec => "s3_plain"`. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sns Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-arn>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-publish_boot_message_arn>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-arn"] +===== `arn` + + * Value type is <> + * There is no default value for this setting. + +Optional ARN to send messages to. If you do not set this you must +include the `sns` field in your events to set the ARN on a per-message basis! + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-publish_boot_message_arn"] +===== `publish_boot_message_arn` + + * Value type is <> + * There is no default value for this setting. + +When an ARN for an SNS topic is specified here, the message +"Logstash successfully booted" will be sent to it when this plugin +is registered. + +Example: arn:aws:sns:us-east-1:770975001275:logstash-testing + + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/solr_http-index.asciidoc b/docs/versioned-plugins/outputs/solr_http-index.asciidoc new file mode 100644 index 000000000..db7f717f1 --- /dev/null +++ b/docs/versioned-plugins/outputs/solr_http-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: solr_http +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::solr_http-v3.0.4.asciidoc[] +include::solr_http-v3.0.3.asciidoc[] +include::solr_http-v3.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/solr_http-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/solr_http-v3.0.2.asciidoc new file mode 100644 index 000000000..5320f43c9 --- /dev/null +++ b/docs/versioned-plugins/outputs/solr_http-v3.0.2.asciidoc @@ -0,0 +1,92 @@ +:plugin: solr_http +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-solr_http/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Solr_http output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you index&store your logs in Solr. If you want to get +started quickly you should use version 4.4 or above in schemaless mode, +which will try and guess your fields automatically. To turn that on, +you can use the example included in the Solr archive: +[source,shell] + tar zxf solr-4.4.0.tgz + cd example + mv solr solr_ #back up the existing sample conf + cp -r example-schemaless/solr/ . #put the schemaless conf in place + java -jar start.jar #start Solr + +You can learn more at https://lucene.apache.org/solr/[the Solr home page] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Solr_http Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-solr_url>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * Default value is `nil` + +Solr document ID for events. You'd typically have a variable here, like +'%{foo}' so you can assign your own IDs + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` + + * Value type is <> + * Default value is `100` + +Number of events to queue up before writing to Solr + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` + + * Value type is <> + * Default value is `1` + +Amount of time since the last flush before a flush is done even if +the number of buffered events is smaller than flush_size + +[id="{version}-plugins-{type}s-{plugin}-solr_url"] +===== `solr_url` + + * Value type is <> + * Default value is `"http://localhost:8983/solr"` + +URL used to connect to Solr + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/solr_http-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/solr_http-v3.0.3.asciidoc new file mode 100644 index 000000000..f72395de1 --- /dev/null +++ b/docs/versioned-plugins/outputs/solr_http-v3.0.3.asciidoc @@ -0,0 +1,92 @@ +:plugin: solr_http +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-solr_http/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Solr_http output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you index&store your logs in Solr. If you want to get +started quickly you should use version 4.4 or above in schemaless mode, +which will try and guess your fields automatically. To turn that on, +you can use the example included in the Solr archive: +[source,shell] + tar zxf solr-4.4.0.tgz + cd example + mv solr solr_ #back up the existing sample conf + cp -r example-schemaless/solr/ . #put the schemaless conf in place + java -jar start.jar #start Solr + +You can learn more at https://lucene.apache.org/solr/[the Solr home page] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Solr_http Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-solr_url>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * Default value is `nil` + +Solr document ID for events. You'd typically have a variable here, like +'%{foo}' so you can assign your own IDs + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` + + * Value type is <> + * Default value is `100` + +Number of events to queue up before writing to Solr + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` + + * Value type is <> + * Default value is `1` + +Amount of time since the last flush before a flush is done even if +the number of buffered events is smaller than flush_size + +[id="{version}-plugins-{type}s-{plugin}-solr_url"] +===== `solr_url` + + * Value type is <> + * Default value is `"http://localhost:8983/solr"` + +URL used to connect to Solr + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/solr_http-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/solr_http-v3.0.4.asciidoc new file mode 100644 index 000000000..fed11012c --- /dev/null +++ b/docs/versioned-plugins/outputs/solr_http-v3.0.4.asciidoc @@ -0,0 +1,92 @@ +:plugin: solr_http +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-solr_http/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Solr_http output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output lets you index&store your logs in Solr. If you want to get +started quickly you should use version 4.4 or above in schemaless mode, +which will try and guess your fields automatically. To turn that on, +you can use the example included in the Solr archive: +[source,shell] + tar zxf solr-4.4.0.tgz + cd example + mv solr solr_ #back up the existing sample conf + cp -r example-schemaless/solr/ . #put the schemaless conf in place + java -jar start.jar #start Solr + +You can learn more at https://lucene.apache.org/solr/[the Solr home page] + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Solr_http Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-solr_url>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-document_id"] +===== `document_id` + + * Value type is <> + * Default value is `nil` + +Solr document ID for events. You'd typically have a variable here, like +'%{foo}' so you can assign your own IDs + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` + + * Value type is <> + * Default value is `100` + +Number of events to queue up before writing to Solr + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` + + * Value type is <> + * Default value is `1` + +Amount of time since the last flush before a flush is done even if +the number of buffered events is smaller than flush_size + +[id="{version}-plugins-{type}s-{plugin}-solr_url"] +===== `solr_url` + + * Value type is <> + * Default value is `"http://localhost:8983/solr"` + +URL used to connect to Solr + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/sqs-index.asciidoc b/docs/versioned-plugins/outputs/sqs-index.asciidoc new file mode 100644 index 000000000..2dc06d1de --- /dev/null +++ b/docs/versioned-plugins/outputs/sqs-index.asciidoc @@ -0,0 +1,20 @@ +:plugin: sqs +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-08-01 +| <> | 2017-08-18 +| <> | 2017-06-23 +|======================================================================= + +include::sqs-v5.0.2.asciidoc[] +include::sqs-v5.0.1.asciidoc[] +include::sqs-v5.0.0.asciidoc[] +include::sqs-v4.0.3.asciidoc[] +include::sqs-v4.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/sqs-v4.0.2.asciidoc b/docs/versioned-plugins/outputs/sqs-v4.0.2.asciidoc new file mode 100644 index 000000000..206a23be9 --- /dev/null +++ b/docs/versioned-plugins/outputs/sqs-v4.0.2.asciidoc @@ -0,0 +1,218 @@ +:plugin: sqs +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-sqs/blob/v4.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Sqs output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push events to an Amazon Web Services (AWS) Simple Queue Service (SQS) queue. + +SQS is a simple, scalable queue system that is part of the Amazon Web +Services suite of tools. Although SQS is similar to other queuing systems +such as Advanced Message Queuing Protocol (AMQP), it uses a custom API and +requires that you have an AWS account. See http://aws.amazon.com/sqs/ for +more details on how SQS works, what the pricing schedule looks like and how +to setup a queue. + +The "consumer" identity must have the following permissions on the queue: + + * `sqs:GetQueueUrl` + * `sqs:SendMessage` + * `sqs:SendMessageBatch` + +Typically, you should setup an IAM policy, create a user and apply the IAM +policy to the user. See http://aws.amazon.com/iam/ for more details on +setting up AWS identities. A sample policy is as follows: + +[source,json] +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "sqs:GetQueueUrl", + "sqs:SendMessage", + "sqs:SendMessageBatch" + ], + "Resource": "arn:aws:sqs:us-east-1:123456789012:my-sqs-queue" + } + ] +} + +==== Batch Publishing +This output publishes messages to SQS in batches in order to optimize event +throughput and increase performance. This is done using the +[`SendMessageBatch`](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessageBatch.html) +API. When publishing messages to SQS in batches, the following service limits +must be respected (see +[Limits in Amazon SQS](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html)): + + * The maximum allowed individual message size is 256KiB. + * The maximum total payload size (i.e. the sum of the sizes of all + individual messages within a batch) is also 256KiB. + +This plugin will dynamically adjust the size of the batch published to SQS in +order to ensure that the total payload size does not exceed 256KiB. + +WARNING: This output cannot currently handle messages larger than 256KiB. Any +single message exceeding this size will be dropped. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sqs Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_max_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-batch"] +===== `batch` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `true` + +Set to `true` to send messages to SQS in batches (with the +`SendMessageBatch` API) or `false` to send messages to SQS individually +(with the `SendMessage` API). The size of the batch is configurable via +`batch_events`. + +[id="{version}-plugins-{type}s-{plugin}-batch_events"] +===== `batch_events` + + * Value type is <> + * Default value is `10` + +The number of events to be sent in each batch. Set this to `1` to disable +the batch sending of messages. + +[id="{version}-plugins-{type}s-{plugin}-batch_timeout"] +===== `batch_timeout` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-message_max_size"] +===== `message_max_size` + + * Value type is <> + * Default value is `"256KiB"` + +The maximum number of bytes for any message sent to SQS. Messages exceeding +this size will be dropped. See +http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html. + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the target SQS queue. Note that this is just the name of the +queue, not the URL or ARN. + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/sqs-v4.0.3.asciidoc b/docs/versioned-plugins/outputs/sqs-v4.0.3.asciidoc new file mode 100644 index 000000000..87fe0e069 --- /dev/null +++ b/docs/versioned-plugins/outputs/sqs-v4.0.3.asciidoc @@ -0,0 +1,218 @@ +:plugin: sqs +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.3 +:release_date: 2017-08-18 +:changelog_url: https://github.com/logstash-plugins/logstash-output-sqs/blob/v4.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Sqs output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push events to an Amazon Web Services (AWS) Simple Queue Service (SQS) queue. + +SQS is a simple, scalable queue system that is part of the Amazon Web +Services suite of tools. Although SQS is similar to other queuing systems +such as Advanced Message Queuing Protocol (AMQP), it uses a custom API and +requires that you have an AWS account. See http://aws.amazon.com/sqs/ for +more details on how SQS works, what the pricing schedule looks like and how +to setup a queue. + +The "consumer" identity must have the following permissions on the queue: + + * `sqs:GetQueueUrl` + * `sqs:SendMessage` + * `sqs:SendMessageBatch` + +Typically, you should setup an IAM policy, create a user and apply the IAM +policy to the user. See http://aws.amazon.com/iam/ for more details on +setting up AWS identities. A sample policy is as follows: + +[source,json] +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "sqs:GetQueueUrl", + "sqs:SendMessage", + "sqs:SendMessageBatch" + ], + "Resource": "arn:aws:sqs:us-east-1:123456789012:my-sqs-queue" + } + ] +} + +==== Batch Publishing +This output publishes messages to SQS in batches in order to optimize event +throughput and increase performance. This is done using the +[`SendMessageBatch`](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessageBatch.html) +API. When publishing messages to SQS in batches, the following service limits +must be respected (see +[Limits in Amazon SQS](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html)): + + * The maximum allowed individual message size is 256KiB. + * The maximum total payload size (i.e. the sum of the sizes of all + individual messages within a batch) is also 256KiB. + +This plugin will dynamically adjust the size of the batch published to SQS in +order to ensure that the total payload size does not exceed 256KiB. + +WARNING: This output cannot currently handle messages larger than 256KiB. Any +single message exceeding this size will be dropped. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sqs Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_max_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-batch"] +===== `batch` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * Default value is `true` + +Set to `true` to send messages to SQS in batches (with the +`SendMessageBatch` API) or `false` to send messages to SQS individually +(with the `SendMessage` API). The size of the batch is configurable via +`batch_events`. + +[id="{version}-plugins-{type}s-{plugin}-batch_events"] +===== `batch_events` + + * Value type is <> + * Default value is `10` + +The number of events to be sent in each batch. Set this to `1` to disable +the batch sending of messages. + +[id="{version}-plugins-{type}s-{plugin}-batch_timeout"] +===== `batch_timeout` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + + + +[id="{version}-plugins-{type}s-{plugin}-message_max_size"] +===== `message_max_size` + + * Value type is <> + * Default value is `"256KiB"` + +The maximum number of bytes for any message sent to SQS. Messages exceeding +this size will be dropped. See +http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html. + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the target SQS queue. Note that this is just the name of the +queue, not the URL or ARN. + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/sqs-v5.0.0.asciidoc b/docs/versioned-plugins/outputs/sqs-v5.0.0.asciidoc new file mode 100644 index 000000000..9904473b7 --- /dev/null +++ b/docs/versioned-plugins/outputs/sqs-v5.0.0.asciidoc @@ -0,0 +1,197 @@ +:plugin: sqs +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.0 +:release_date: 2017-08-01 +:changelog_url: https://github.com/logstash-plugins/logstash-output-sqs/blob/v5.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Sqs output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push events to an Amazon Web Services (AWS) Simple Queue Service (SQS) queue. + +SQS is a simple, scalable queue system that is part of the Amazon Web +Services suite of tools. Although SQS is similar to other queuing systems +such as Advanced Message Queuing Protocol (AMQP), it uses a custom API and +requires that you have an AWS account. See http://aws.amazon.com/sqs/ for +more details on how SQS works, what the pricing schedule looks like and how +to setup a queue. + +The "consumer" identity must have the following permissions on the queue: + + * `sqs:GetQueueUrl` + * `sqs:SendMessage` + * `sqs:SendMessageBatch` + +Typically, you should setup an IAM policy, create a user and apply the IAM +policy to the user. See http://aws.amazon.com/iam/ for more details on +setting up AWS identities. A sample policy is as follows: + +[source,json] +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "sqs:GetQueueUrl", + "sqs:SendMessage", + "sqs:SendMessageBatch" + ], + "Resource": "arn:aws:sqs:us-east-1:123456789012:my-sqs-queue" + } + ] +} + +==== Batch Publishing +This output publishes messages to SQS in batches in order to optimize event +throughput and increase performance. This is done using the +[`SendMessageBatch`](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessageBatch.html) +API. When publishing messages to SQS in batches, the following service limits +must be respected (see +[Limits in Amazon SQS](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html)): + + * The maximum allowed individual message size is 256KiB. + * The maximum total payload size (i.e. the sum of the sizes of all + individual messages within a batch) is also 256KiB. + +This plugin will dynamically adjust the size of the batch published to SQS in +order to ensure that the total payload size does not exceed 256KiB. + +WARNING: This output cannot currently handle messages larger than 256KiB. Any +single message exceeding this size will be dropped. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sqs Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_max_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-batch_events"] +===== `batch_events` + + * Value type is <> + * Default value is `10` + +The number of events to be sent in each batch. Set this to `1` to disable +the batch sending of messages. + +[id="{version}-plugins-{type}s-{plugin}-message_max_size"] +===== `message_max_size` + + * Value type is <> + * Default value is `"256KiB"` + +The maximum number of bytes for any message sent to SQS. Messages exceeding +this size will be dropped. See +http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html. + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the target SQS queue. Note that this is just the name of the +queue, not the URL or ARN. + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/sqs-v5.0.1.asciidoc b/docs/versioned-plugins/outputs/sqs-v5.0.1.asciidoc new file mode 100644 index 000000000..0faef1ff6 --- /dev/null +++ b/docs/versioned-plugins/outputs/sqs-v5.0.1.asciidoc @@ -0,0 +1,197 @@ +:plugin: sqs +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.1 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-sqs/blob/v5.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Sqs output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push events to an Amazon Web Services (AWS) Simple Queue Service (SQS) queue. + +SQS is a simple, scalable queue system that is part of the Amazon Web +Services suite of tools. Although SQS is similar to other queuing systems +such as Advanced Message Queuing Protocol (AMQP), it uses a custom API and +requires that you have an AWS account. See http://aws.amazon.com/sqs/ for +more details on how SQS works, what the pricing schedule looks like and how +to setup a queue. + +The "consumer" identity must have the following permissions on the queue: + + * `sqs:GetQueueUrl` + * `sqs:SendMessage` + * `sqs:SendMessageBatch` + +Typically, you should setup an IAM policy, create a user and apply the IAM +policy to the user. See http://aws.amazon.com/iam/ for more details on +setting up AWS identities. A sample policy is as follows: + +[source,json] +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "sqs:GetQueueUrl", + "sqs:SendMessage", + "sqs:SendMessageBatch" + ], + "Resource": "arn:aws:sqs:us-east-1:123456789012:my-sqs-queue" + } + ] +} + +==== Batch Publishing +This output publishes messages to SQS in batches in order to optimize event +throughput and increase performance. This is done using the +[`SendMessageBatch`](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessageBatch.html) +API. When publishing messages to SQS in batches, the following service limits +must be respected (see +[Limits in Amazon SQS](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html)): + + * The maximum allowed individual message size is 256KiB. + * The maximum total payload size (i.e. the sum of the sizes of all + individual messages within a batch) is also 256KiB. + +This plugin will dynamically adjust the size of the batch published to SQS in +order to ensure that the total payload size does not exceed 256KiB. + +WARNING: This output cannot currently handle messages larger than 256KiB. Any +single message exceeding this size will be dropped. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sqs Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_max_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-batch_events"] +===== `batch_events` + + * Value type is <> + * Default value is `10` + +The number of events to be sent in each batch. Set this to `1` to disable +the batch sending of messages. + +[id="{version}-plugins-{type}s-{plugin}-message_max_size"] +===== `message_max_size` + + * Value type is <> + * Default value is `"256KiB"` + +The maximum number of bytes for any message sent to SQS. Messages exceeding +this size will be dropped. See +http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html. + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the target SQS queue. Note that this is just the name of the +queue, not the URL or ARN. + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/sqs-v5.0.2.asciidoc b/docs/versioned-plugins/outputs/sqs-v5.0.2.asciidoc new file mode 100644 index 000000000..bbf41b62b --- /dev/null +++ b/docs/versioned-plugins/outputs/sqs-v5.0.2.asciidoc @@ -0,0 +1,197 @@ +:plugin: sqs +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.2 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-sqs/blob/v5.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Sqs output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Push events to an Amazon Web Services (AWS) Simple Queue Service (SQS) queue. + +SQS is a simple, scalable queue system that is part of the Amazon Web +Services suite of tools. Although SQS is similar to other queuing systems +such as Advanced Message Queuing Protocol (AMQP), it uses a custom API and +requires that you have an AWS account. See http://aws.amazon.com/sqs/ for +more details on how SQS works, what the pricing schedule looks like and how +to setup a queue. + +The "consumer" identity must have the following permissions on the queue: + + * `sqs:GetQueueUrl` + * `sqs:SendMessage` + * `sqs:SendMessageBatch` + +Typically, you should setup an IAM policy, create a user and apply the IAM +policy to the user. See http://aws.amazon.com/iam/ for more details on +setting up AWS identities. A sample policy is as follows: + +[source,json] +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "sqs:GetQueueUrl", + "sqs:SendMessage", + "sqs:SendMessageBatch" + ], + "Resource": "arn:aws:sqs:us-east-1:123456789012:my-sqs-queue" + } + ] +} + +==== Batch Publishing +This output publishes messages to SQS in batches in order to optimize event +throughput and increase performance. This is done using the +[`SendMessageBatch`](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessageBatch.html) +API. When publishing messages to SQS in batches, the following service limits +must be respected (see +[Limits in Amazon SQS](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html)): + + * The maximum allowed individual message size is 256KiB. + * The maximum total payload size (i.e. the sum of the sizes of all + individual messages within a batch) is also 256KiB. + +This plugin will dynamically adjust the size of the batch published to SQS in +order to ensure that the total payload size does not exceed 256KiB. + +WARNING: This output cannot currently handle messages larger than 256KiB. Any +single message exceeding this size will be dropped. + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Sqs Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message_max_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-access_key_id"] +===== `access_key_id` + + * Value type is <> + * There is no default value for this setting. + +This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: + +1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config +2. External credentials file specified by `aws_credentials_file` +3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` +4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` +5. IAM Instance Profile (available when running inside EC2) + +[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] +===== `aws_credentials_file` + + * Value type is <> + * There is no default value for this setting. + +Path to YAML file containing a hash of AWS credentials. +This file will only be loaded if `access_key_id` and +`secret_access_key` aren't set. The contents of the +file should look like this: + +[source,ruby] +---------------------------------- + :access_key_id: "12345" + :secret_access_key: "54321" +---------------------------------- + + +[id="{version}-plugins-{type}s-{plugin}-batch_events"] +===== `batch_events` + + * Value type is <> + * Default value is `10` + +The number of events to be sent in each batch. Set this to `1` to disable +the batch sending of messages. + +[id="{version}-plugins-{type}s-{plugin}-message_max_size"] +===== `message_max_size` + + * Value type is <> + * Default value is `"256KiB"` + +The maximum number of bytes for any message sent to SQS. Messages exceeding +this size will be dropped. See +http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html. + +[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] +===== `proxy_uri` + + * Value type is <> + * There is no default value for this setting. + +URI to proxy server if required + +[id="{version}-plugins-{type}s-{plugin}-queue"] +===== `queue` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The name of the target SQS queue. Note that this is just the name of the +queue, not the URL or ARN. + +[id="{version}-plugins-{type}s-{plugin}-region"] +===== `region` + + * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` + * Default value is `"us-east-1"` + +The AWS Region + +[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] +===== `secret_access_key` + + * Value type is <> + * There is no default value for this setting. + +The AWS Secret Access Key + +[id="{version}-plugins-{type}s-{plugin}-session_token"] +===== `session_token` + + * Value type is <> + * There is no default value for this setting. + +The AWS Session token for temporary credential + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/statsd-index.asciidoc b/docs/versioned-plugins/outputs/statsd-index.asciidoc new file mode 100644 index 000000000..297baedf6 --- /dev/null +++ b/docs/versioned-plugins/outputs/statsd-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: statsd +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::statsd-v3.1.4.asciidoc[] +include::statsd-v3.1.3.asciidoc[] +include::statsd-v3.1.2.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/statsd-v3.1.2.asciidoc b/docs/versioned-plugins/outputs/statsd-v3.1.2.asciidoc new file mode 100644 index 000000000..25627c427 --- /dev/null +++ b/docs/versioned-plugins/outputs/statsd-v3.1.2.asciidoc @@ -0,0 +1,193 @@ +:plugin: statsd +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-statsd/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Statsd output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +statsd is a network daemon for aggregating statistics, such as counters and timers, +and shipping over UDP to backend services, such as Graphite or Datadog. The general +idea is that you send metrics to statsd and every few seconds it will emit the +aggregated values to the backend. Example aggregates are sums, average and maximum +values, their standard deviation, etc. This plugin makes it easy to send such +metrics based on data in Logstash events. + +You can learn about statsd here: + +* https://codeascraft.com/2011/02/15/measure-anything-measure-everything/[Etsy blog post announcing statsd] +* https://github.com/etsy/statsd[statsd on github] + +Typical examples of how this can be used with Logstash include counting HTTP hits +by response code, summing the total number of bytes of traffic served, and tracking +the 50th and 95th percentile of the processing time of requests. + +Each metric emitted to statsd has a dot-separated path, a type, and a value. The +metric path is built from the `namespace` and `sender` options together with the +metric name that's picked up depending on the type of metric. All in all, the +metric path will follow this pattern: + + namespace.sender.metric + +With regards to this plugin, the default namespace is "logstash", the default +sender is the `host` field, and the metric name depends on what is set as the +metric name in the `increment`, `decrement`, `timing`, `count`, `set` or `gauge` +options. In metric paths, colons (":"), pipes ("|") and at signs ("@") are reserved +and will be replaced by underscores ("_"). + +Example: +[source,ruby] +output { + statsd { + host => "statsd.example.org" + count => { + "http.bytes" => "%{bytes}" + } + } +} + +If run on a host named hal9000 the configuration above will send the following +metric to statsd if the current event has 123 in its `bytes` field: + + logstash.hal9000.http.bytes:123|c + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Statsd Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-decrement>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-increment>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sample_rate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-set>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timing>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-count"] +===== `count` + + * Value type is <> + * Default value is `{}` + +A count metric. `metric_name => count` as hash. `%{fieldname}` substitutions are +allowed in the metric names. + +[id="{version}-plugins-{type}s-{plugin}-decrement"] +===== `decrement` + + * Value type is <> + * Default value is `[]` + +A decrement metric. Metric names as array. `%{fieldname}` substitutions are +allowed in the metric names. + +[id="{version}-plugins-{type}s-{plugin}-gauge"] +===== `gauge` + + * Value type is <> + * Default value is `{}` + +A gauge metric. `metric_name => gauge` as hash. `%{fieldname}` substitutions are +allowed in the metric names. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The hostname or IP address of the statsd server. + +[id="{version}-plugins-{type}s-{plugin}-increment"] +===== `increment` + + * Value type is <> + * Default value is `[]` + +An increment metric. Metric names as array. `%{fieldname}` substitutions are +allowed in the metric names. + +[id="{version}-plugins-{type}s-{plugin}-namespace"] +===== `namespace` + + * Value type is <> + * Default value is `"logstash"` + +The statsd namespace to use for this metric. `%{fieldname}` substitutions are +allowed. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8125` + +The port to connect to on your statsd server. + +[id="{version}-plugins-{type}s-{plugin}-sample_rate"] +===== `sample_rate` + + * Value type is <> + * Default value is `1` + +The sample rate for the metric. + +[id="{version}-plugins-{type}s-{plugin}-sender"] +===== `sender` + + * Value type is <> + * Default value is `"%{host}"` + +The name of the sender. Dots will be replaced with underscores. `%{fieldname}` +substitutions are allowed. + +[id="{version}-plugins-{type}s-{plugin}-set"] +===== `set` + + * Value type is <> + * Default value is `{}` + +A set metric. `metric_name => "string"` to append as hash. `%{fieldname}` +substitutions are allowed in the metric names. + +[id="{version}-plugins-{type}s-{plugin}-timing"] +===== `timing` + + * Value type is <> + * Default value is `{}` + +A timing metric. `metric_name => duration` as hash. `%{fieldname}` substitutions +are allowed in the metric names. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/statsd-v3.1.3.asciidoc b/docs/versioned-plugins/outputs/statsd-v3.1.3.asciidoc new file mode 100644 index 000000000..f9129b75a --- /dev/null +++ b/docs/versioned-plugins/outputs/statsd-v3.1.3.asciidoc @@ -0,0 +1,193 @@ +:plugin: statsd +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-statsd/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Statsd output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +statsd is a network daemon for aggregating statistics, such as counters and timers, +and shipping over UDP to backend services, such as Graphite or Datadog. The general +idea is that you send metrics to statsd and every few seconds it will emit the +aggregated values to the backend. Example aggregates are sums, average and maximum +values, their standard deviation, etc. This plugin makes it easy to send such +metrics based on data in Logstash events. + +You can learn about statsd here: + +* https://codeascraft.com/2011/02/15/measure-anything-measure-everything/[Etsy blog post announcing statsd] +* https://github.com/etsy/statsd[statsd on github] + +Typical examples of how this can be used with Logstash include counting HTTP hits +by response code, summing the total number of bytes of traffic served, and tracking +the 50th and 95th percentile of the processing time of requests. + +Each metric emitted to statsd has a dot-separated path, a type, and a value. The +metric path is built from the `namespace` and `sender` options together with the +metric name that's picked up depending on the type of metric. All in all, the +metric path will follow this pattern: + + namespace.sender.metric + +With regards to this plugin, the default namespace is "logstash", the default +sender is the `host` field, and the metric name depends on what is set as the +metric name in the `increment`, `decrement`, `timing`, `count`, `set` or `gauge` +options. In metric paths, colons (":"), pipes ("|") and at signs ("@") are reserved +and will be replaced by underscores ("_"). + +Example: +[source,ruby] +output { + statsd { + host => "statsd.example.org" + count => { + "http.bytes" => "%{bytes}" + } + } +} + +If run on a host named hal9000 the configuration above will send the following +metric to statsd if the current event has 123 in its `bytes` field: + + logstash.hal9000.http.bytes:123|c + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Statsd Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-decrement>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-increment>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sample_rate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-set>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timing>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-count"] +===== `count` + + * Value type is <> + * Default value is `{}` + +A count metric. `metric_name => count` as hash. `%{fieldname}` substitutions are +allowed in the metric names. + +[id="{version}-plugins-{type}s-{plugin}-decrement"] +===== `decrement` + + * Value type is <> + * Default value is `[]` + +A decrement metric. Metric names as array. `%{fieldname}` substitutions are +allowed in the metric names. + +[id="{version}-plugins-{type}s-{plugin}-gauge"] +===== `gauge` + + * Value type is <> + * Default value is `{}` + +A gauge metric. `metric_name => gauge` as hash. `%{fieldname}` substitutions are +allowed in the metric names. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The hostname or IP address of the statsd server. + +[id="{version}-plugins-{type}s-{plugin}-increment"] +===== `increment` + + * Value type is <> + * Default value is `[]` + +An increment metric. Metric names as array. `%{fieldname}` substitutions are +allowed in the metric names. + +[id="{version}-plugins-{type}s-{plugin}-namespace"] +===== `namespace` + + * Value type is <> + * Default value is `"logstash"` + +The statsd namespace to use for this metric. `%{fieldname}` substitutions are +allowed. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8125` + +The port to connect to on your statsd server. + +[id="{version}-plugins-{type}s-{plugin}-sample_rate"] +===== `sample_rate` + + * Value type is <> + * Default value is `1` + +The sample rate for the metric. + +[id="{version}-plugins-{type}s-{plugin}-sender"] +===== `sender` + + * Value type is <> + * Default value is `"%{host}"` + +The name of the sender. Dots will be replaced with underscores. `%{fieldname}` +substitutions are allowed. + +[id="{version}-plugins-{type}s-{plugin}-set"] +===== `set` + + * Value type is <> + * Default value is `{}` + +A set metric. `metric_name => "string"` to append as hash. `%{fieldname}` +substitutions are allowed in the metric names. + +[id="{version}-plugins-{type}s-{plugin}-timing"] +===== `timing` + + * Value type is <> + * Default value is `{}` + +A timing metric. `metric_name => duration` as hash. `%{fieldname}` substitutions +are allowed in the metric names. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/statsd-v3.1.4.asciidoc b/docs/versioned-plugins/outputs/statsd-v3.1.4.asciidoc new file mode 100644 index 000000000..5ec387217 --- /dev/null +++ b/docs/versioned-plugins/outputs/statsd-v3.1.4.asciidoc @@ -0,0 +1,193 @@ +:plugin: statsd +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.4 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-statsd/blob/v3.1.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Statsd output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +statsd is a network daemon for aggregating statistics, such as counters and timers, +and shipping over UDP to backend services, such as Graphite or Datadog. The general +idea is that you send metrics to statsd and every few seconds it will emit the +aggregated values to the backend. Example aggregates are sums, average and maximum +values, their standard deviation, etc. This plugin makes it easy to send such +metrics based on data in Logstash events. + +You can learn about statsd here: + +* https://codeascraft.com/2011/02/15/measure-anything-measure-everything/[Etsy blog post announcing statsd] +* https://github.com/etsy/statsd[statsd on github] + +Typical examples of how this can be used with Logstash include counting HTTP hits +by response code, summing the total number of bytes of traffic served, and tracking +the 50th and 95th percentile of the processing time of requests. + +Each metric emitted to statsd has a dot-separated path, a type, and a value. The +metric path is built from the `namespace` and `sender` options together with the +metric name that's picked up depending on the type of metric. All in all, the +metric path will follow this pattern: + + namespace.sender.metric + +With regards to this plugin, the default namespace is "logstash", the default +sender is the `host` field, and the metric name depends on what is set as the +metric name in the `increment`, `decrement`, `timing`, `count`, `set` or `gauge` +options. In metric paths, colons (":"), pipes ("|") and at signs ("@") are reserved +and will be replaced by underscores ("_"). + +Example: +[source,ruby] +output { + statsd { + host => "statsd.example.org" + count => { + "http.bytes" => "%{bytes}" + } + } +} + +If run on a host named hal9000 the configuration above will send the following +metric to statsd if the current event has 123 in its `bytes` field: + + logstash.hal9000.http.bytes:123|c + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Statsd Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-decrement>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-increment>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sample_rate>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-set>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timing>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-count"] +===== `count` + + * Value type is <> + * Default value is `{}` + +A count metric. `metric_name => count` as hash. `%{fieldname}` substitutions are +allowed in the metric names. + +[id="{version}-plugins-{type}s-{plugin}-decrement"] +===== `decrement` + + * Value type is <> + * Default value is `[]` + +A decrement metric. Metric names as array. `%{fieldname}` substitutions are +allowed in the metric names. + +[id="{version}-plugins-{type}s-{plugin}-gauge"] +===== `gauge` + + * Value type is <> + * Default value is `{}` + +A gauge metric. `metric_name => gauge` as hash. `%{fieldname}` substitutions are +allowed in the metric names. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"localhost"` + +The hostname or IP address of the statsd server. + +[id="{version}-plugins-{type}s-{plugin}-increment"] +===== `increment` + + * Value type is <> + * Default value is `[]` + +An increment metric. Metric names as array. `%{fieldname}` substitutions are +allowed in the metric names. + +[id="{version}-plugins-{type}s-{plugin}-namespace"] +===== `namespace` + + * Value type is <> + * Default value is `"logstash"` + +The statsd namespace to use for this metric. `%{fieldname}` substitutions are +allowed. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `8125` + +The port to connect to on your statsd server. + +[id="{version}-plugins-{type}s-{plugin}-sample_rate"] +===== `sample_rate` + + * Value type is <> + * Default value is `1` + +The sample rate for the metric. + +[id="{version}-plugins-{type}s-{plugin}-sender"] +===== `sender` + + * Value type is <> + * Default value is `"%{host}"` + +The name of the sender. Dots will be replaced with underscores. `%{fieldname}` +substitutions are allowed. + +[id="{version}-plugins-{type}s-{plugin}-set"] +===== `set` + + * Value type is <> + * Default value is `{}` + +A set metric. `metric_name => "string"` to append as hash. `%{fieldname}` +substitutions are allowed in the metric names. + +[id="{version}-plugins-{type}s-{plugin}-timing"] +===== `timing` + + * Value type is <> + * Default value is `{}` + +A timing metric. `metric_name => duration` as hash. `%{fieldname}` substitutions +are allowed in the metric names. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/stdout-index.asciidoc b/docs/versioned-plugins/outputs/stdout-index.asciidoc new file mode 100644 index 000000000..61f66c359 --- /dev/null +++ b/docs/versioned-plugins/outputs/stdout-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: stdout +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::stdout-v3.1.3.asciidoc[] +include::stdout-v3.1.2.asciidoc[] +include::stdout-v3.1.1.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/stdout-v3.1.1.asciidoc b/docs/versioned-plugins/outputs/stdout-v3.1.1.asciidoc new file mode 100644 index 000000000..6fb470bdb --- /dev/null +++ b/docs/versioned-plugins/outputs/stdout-v3.1.1.asciidoc @@ -0,0 +1,64 @@ +:plugin: stdout +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-stdout/blob/v3.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Stdout output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +A simple output which prints to the STDOUT of the shell running +Logstash. This output can be quite convenient when debugging +plugin configurations, by allowing instant access to the event +data after it has passed through the inputs and filters. + +For example, the following output configuration, in conjunction with the +Logstash `-e` command-line flag, will allow you to see the results +of your event pipeline for quick iteration. +[source,ruby] + output { + stdout {} + } + +Useful codecs include: + +`rubydebug`: outputs event data using the ruby "awesome_print" +http://rubygems.org/gems/awesome_print[library] + +[source,ruby] + output { + stdout { codec => rubydebug } + } + +`json`: outputs event data in structured JSON format +[source,ruby] + output { + stdout { codec => json } + } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Stdout Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +|======================================================================= + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/stdout-v3.1.2.asciidoc b/docs/versioned-plugins/outputs/stdout-v3.1.2.asciidoc new file mode 100644 index 000000000..b0ca17e6b --- /dev/null +++ b/docs/versioned-plugins/outputs/stdout-v3.1.2.asciidoc @@ -0,0 +1,60 @@ +:plugin: stdout +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-stdout/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Stdout output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +A simple output which prints to the STDOUT of the shell running +Logstash. This output can be quite convenient when debugging +plugin configurations, by allowing instant access to the event +data after it has passed through the inputs and filters. + +For example, the following output configuration, in conjunction with the +Logstash `-e` command-line flag, will allow you to see the results +of your event pipeline for quick iteration. +[source,ruby] + output { + stdout {} + } + +Useful codecs include: + +`rubydebug`: outputs event data using the ruby "awesome_print" +http://rubygems.org/gems/awesome_print[library] + +[source,ruby] + output { + stdout { codec => rubydebug } + } + +`json`: outputs event data in structured JSON format +[source,ruby] + output { + stdout { codec => json } + } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Stdout Output Configuration Options + +There are no special configuration options for this plugin, +but it does support the <<{version}-plugins-{type}s-{plugin}-common-options>>. + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/stdout-v3.1.3.asciidoc b/docs/versioned-plugins/outputs/stdout-v3.1.3.asciidoc new file mode 100644 index 000000000..c12bf9ca1 --- /dev/null +++ b/docs/versioned-plugins/outputs/stdout-v3.1.3.asciidoc @@ -0,0 +1,60 @@ +:plugin: stdout +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.3 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-stdout/blob/v3.1.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Stdout output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +A simple output which prints to the STDOUT of the shell running +Logstash. This output can be quite convenient when debugging +plugin configurations, by allowing instant access to the event +data after it has passed through the inputs and filters. + +For example, the following output configuration, in conjunction with the +Logstash `-e` command-line flag, will allow you to see the results +of your event pipeline for quick iteration. +[source,ruby] + output { + stdout {} + } + +Useful codecs include: + +`rubydebug`: outputs event data using the ruby "awesome_print" +http://rubygems.org/gems/awesome_print[library] + +[source,ruby] + output { + stdout { codec => rubydebug } + } + +`json`: outputs event data in structured JSON format +[source,ruby] + output { + stdout { codec => json } + } + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Stdout Output Configuration Options + +There are no special configuration options for this plugin, +but it does support the <<{version}-plugins-{type}s-{plugin}-common-options>>. + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/stomp-index.asciidoc b/docs/versioned-plugins/outputs/stomp-index.asciidoc new file mode 100644 index 000000000..500600f51 --- /dev/null +++ b/docs/versioned-plugins/outputs/stomp-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: stomp +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::stomp-v3.0.8.asciidoc[] +include::stomp-v3.0.7.asciidoc[] +include::stomp-v3.0.5.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/stomp-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/stomp-v3.0.5.asciidoc new file mode 100644 index 000000000..5e0a364ba --- /dev/null +++ b/docs/versioned-plugins/outputs/stomp-v3.0.5.asciidoc @@ -0,0 +1,123 @@ +:plugin: stomp +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-stomp/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Stomp output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Stomp Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-debug>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-debug"] +===== `debug` + + * Value type is <> + * Default value is `false` + +Enable debugging output? + +[id="{version}-plugins-{type}s-{plugin}-destination"] +===== `destination` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The destination to read events from. Supports string expansion, meaning +`%{foo}` values will expand to the field value. + +Example: "/topic/logstash" + +[id="{version}-plugins-{type}s-{plugin}-headers"] +===== `headers` + + * Value type is <> + * There is no default value for this setting. + +Custom headers to send with each message. Supports string expansion, meaning +%{foo} values will expand to the field value. + +Example: headers => ["amq-msg-type", "text", "host", "%{host}"] + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The address of the STOMP server. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `""` + +The password to authenticate with. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `61613` + +The port to connect to on your STOMP server. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `""` + +The username to authenticate with. + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `nil` + +The vhost to use + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/stomp-v3.0.7.asciidoc b/docs/versioned-plugins/outputs/stomp-v3.0.7.asciidoc new file mode 100644 index 000000000..96be58564 --- /dev/null +++ b/docs/versioned-plugins/outputs/stomp-v3.0.7.asciidoc @@ -0,0 +1,123 @@ +:plugin: stomp +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.7 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-stomp/blob/v3.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Stomp output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output writes events using the STOMP protocol. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Stomp Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-debug>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-debug"] +===== `debug` + + * Value type is <> + * Default value is `false` + +Enable debugging output? + +[id="{version}-plugins-{type}s-{plugin}-destination"] +===== `destination` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The destination to read events from. Supports string expansion, meaning +`%{foo}` values will expand to the field value. + +Example: "/topic/logstash" + +[id="{version}-plugins-{type}s-{plugin}-headers"] +===== `headers` + + * Value type is <> + * There is no default value for this setting. + +Custom headers to send with each message. Supports string expansion, meaning +%{foo} values will expand to the field value. + +Example: headers => ["amq-msg-type", "text", "host", "%{host}"] + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The address of the STOMP server. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `""` + +The password to authenticate with. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `61613` + +The port to connect to on your STOMP server. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `""` + +The username to authenticate with. + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `nil` + +The vhost to use + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/stomp-v3.0.8.asciidoc b/docs/versioned-plugins/outputs/stomp-v3.0.8.asciidoc new file mode 100644 index 000000000..a883b2eb0 --- /dev/null +++ b/docs/versioned-plugins/outputs/stomp-v3.0.8.asciidoc @@ -0,0 +1,123 @@ +:plugin: stomp +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.8 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-stomp/blob/v3.0.8/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Stomp output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output writes events using the STOMP protocol. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Stomp Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-debug>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-debug"] +===== `debug` + + * Value type is <> + * Default value is `false` + +Enable debugging output? + +[id="{version}-plugins-{type}s-{plugin}-destination"] +===== `destination` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The destination to read events from. Supports string expansion, meaning +`%{foo}` values will expand to the field value. + +Example: "/topic/logstash" + +[id="{version}-plugins-{type}s-{plugin}-headers"] +===== `headers` + + * Value type is <> + * There is no default value for this setting. + +Custom headers to send with each message. Supports string expansion, meaning +%{foo} values will expand to the field value. + +Example: headers => ["amq-msg-type", "text", "host", "%{host}"] + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The address of the STOMP server. + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * Value type is <> + * Default value is `""` + +The password to authenticate with. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `61613` + +The port to connect to on your STOMP server. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * Value type is <> + * Default value is `""` + +The username to authenticate with. + +[id="{version}-plugins-{type}s-{plugin}-vhost"] +===== `vhost` + + * Value type is <> + * Default value is `nil` + +The vhost to use + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/syslog-index.asciidoc b/docs/versioned-plugins/outputs/syslog-index.asciidoc new file mode 100644 index 000000000..27a75c337 --- /dev/null +++ b/docs/versioned-plugins/outputs/syslog-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: syslog +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::syslog-v3.0.4.asciidoc[] +include::syslog-v3.0.3.asciidoc[] +include::syslog-v3.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/syslog-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/syslog-v3.0.2.asciidoc new file mode 100644 index 000000000..1bbb6a4e2 --- /dev/null +++ b/docs/versioned-plugins/outputs/syslog-v3.0.2.asciidoc @@ -0,0 +1,239 @@ +:plugin: syslog +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-syslog/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Syslog output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Send events to a syslog server. + +You can send messages compliant with RFC3164 or RFC5424 +using either UDP or TCP as the transport protocol. + +By default the contents of the `message` field will be shipped as +the free-form message text part of the emitted syslog message. If +your messages don't have a `message` field or if you for some other +reason want to change the emitted message, modify the `message` +configuration option. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Syslog Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-appname>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-facility>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-msgid>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-priority>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-procid>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-protocol>> |<>, one of `["tcp", "udp", "ssl-tcp"]`|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-rfc>> |<>, one of `["rfc3164", "rfc5424"]`|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sourcehost>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-appname"] +===== `appname` + + * Value type is <> + * Default value is `"LOGSTASH"` + +application name for syslog message. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-facility"] +===== `facility` + + * Value type is <> + * Default value is `"user-level"` + +facility label for syslog message +default fallback to user-level as in rfc3164 +The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +syslog server address to connect to + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * Default value is `"%{message}"` + +message text to log. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-msgid"] +===== `msgid` + + * Value type is <> + * Default value is `"-"` + +message id for syslog message. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +syslog server port to connect to + +[id="{version}-plugins-{type}s-{plugin}-priority"] +===== `priority` + + * Value type is <> + * Default value is `"%{syslog_pri}"` + +syslog priority +The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-procid"] +===== `procid` + + * Value type is <> + * Default value is `"-"` + +process id for syslog message. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-protocol"] +===== `protocol` + + * Value can be any of: `tcp`, `udp`, `ssl-tcp` + * Default value is `"udp"` + +syslog server protocol. you can choose between udp, tcp and ssl/tls over tcp + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `1` + +when connection fails, retry interval in sec. + +[id="{version}-plugins-{type}s-{plugin}-rfc"] +===== `rfc` + + * Value can be any of: `rfc3164`, `rfc5424` + * Default value is `"rfc3164"` + +syslog message format: you can choose between rfc3164 or rfc5424 + +[id="{version}-plugins-{type}s-{plugin}-severity"] +===== `severity` + + * Value type is <> + * Default value is `"notice"` + +severity label for syslog message +default fallback to notice as in rfc3164 +The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-sourcehost"] +===== `sourcehost` + + * Value type is <> + * Default value is `"%{host}"` + +source host for syslog message. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` + + * Value type is <> + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `false` + +Verify the identity of the other end of the SSL connection against the CA. + +[id="{version}-plugins-{type}s-{plugin}-use_labels"] +===== `use_labels` + + * Value type is <> + * Default value is `true` + +use label parsing for severity and facility levels +use priority field if set to false + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/syslog-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/syslog-v3.0.3.asciidoc new file mode 100644 index 000000000..4f0986290 --- /dev/null +++ b/docs/versioned-plugins/outputs/syslog-v3.0.3.asciidoc @@ -0,0 +1,239 @@ +:plugin: syslog +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-syslog/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Syslog output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Send events to a syslog server. + +You can send messages compliant with RFC3164 or RFC5424 +using either UDP or TCP as the transport protocol. + +By default the contents of the `message` field will be shipped as +the free-form message text part of the emitted syslog message. If +your messages don't have a `message` field or if you for some other +reason want to change the emitted message, modify the `message` +configuration option. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Syslog Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-appname>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-facility>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-msgid>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-priority>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-procid>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-protocol>> |<>, one of `["tcp", "udp", "ssl-tcp"]`|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-rfc>> |<>, one of `["rfc3164", "rfc5424"]`|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sourcehost>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-appname"] +===== `appname` + + * Value type is <> + * Default value is `"LOGSTASH"` + +application name for syslog message. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-facility"] +===== `facility` + + * Value type is <> + * Default value is `"user-level"` + +facility label for syslog message +default fallback to user-level as in rfc3164 +The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +syslog server address to connect to + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * Default value is `"%{message}"` + +message text to log. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-msgid"] +===== `msgid` + + * Value type is <> + * Default value is `"-"` + +message id for syslog message. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +syslog server port to connect to + +[id="{version}-plugins-{type}s-{plugin}-priority"] +===== `priority` + + * Value type is <> + * Default value is `"%{syslog_pri}"` + +syslog priority +The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-procid"] +===== `procid` + + * Value type is <> + * Default value is `"-"` + +process id for syslog message. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-protocol"] +===== `protocol` + + * Value can be any of: `tcp`, `udp`, `ssl-tcp` + * Default value is `"udp"` + +syslog server protocol. you can choose between udp, tcp and ssl/tls over tcp + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `1` + +when connection fails, retry interval in sec. + +[id="{version}-plugins-{type}s-{plugin}-rfc"] +===== `rfc` + + * Value can be any of: `rfc3164`, `rfc5424` + * Default value is `"rfc3164"` + +syslog message format: you can choose between rfc3164 or rfc5424 + +[id="{version}-plugins-{type}s-{plugin}-severity"] +===== `severity` + + * Value type is <> + * Default value is `"notice"` + +severity label for syslog message +default fallback to notice as in rfc3164 +The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-sourcehost"] +===== `sourcehost` + + * Value type is <> + * Default value is `"%{host}"` + +source host for syslog message. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` + + * Value type is <> + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `false` + +Verify the identity of the other end of the SSL connection against the CA. + +[id="{version}-plugins-{type}s-{plugin}-use_labels"] +===== `use_labels` + + * Value type is <> + * Default value is `true` + +use label parsing for severity and facility levels +use priority field if set to false + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/syslog-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/syslog-v3.0.4.asciidoc new file mode 100644 index 000000000..c8d6c78c2 --- /dev/null +++ b/docs/versioned-plugins/outputs/syslog-v3.0.4.asciidoc @@ -0,0 +1,239 @@ +:plugin: syslog +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-syslog/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Syslog output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Send events to a syslog server. + +You can send messages compliant with RFC3164 or RFC5424 +using either UDP or TCP as the transport protocol. + +By default the contents of the `message` field will be shipped as +the free-form message text part of the emitted syslog message. If +your messages don't have a `message` field or if you for some other +reason want to change the emitted message, modify the `message` +configuration option. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Syslog Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-appname>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-facility>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-msgid>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-priority>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-procid>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-protocol>> |<>, one of `["tcp", "udp", "ssl-tcp"]`|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-rfc>> |<>, one of `["rfc3164", "rfc5424"]`|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-sourcehost>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-appname"] +===== `appname` + + * Value type is <> + * Default value is `"LOGSTASH"` + +application name for syslog message. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-facility"] +===== `facility` + + * Value type is <> + * Default value is `"user-level"` + +facility label for syslog message +default fallback to user-level as in rfc3164 +The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +syslog server address to connect to + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * Value type is <> + * Default value is `"%{message}"` + +message text to log. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-msgid"] +===== `msgid` + + * Value type is <> + * Default value is `"-"` + +message id for syslog message. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +syslog server port to connect to + +[id="{version}-plugins-{type}s-{plugin}-priority"] +===== `priority` + + * Value type is <> + * Default value is `"%{syslog_pri}"` + +syslog priority +The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-procid"] +===== `procid` + + * Value type is <> + * Default value is `"-"` + +process id for syslog message. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-protocol"] +===== `protocol` + + * Value can be any of: `tcp`, `udp`, `ssl-tcp` + * Default value is `"udp"` + +syslog server protocol. you can choose between udp, tcp and ssl/tls over tcp + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `1` + +when connection fails, retry interval in sec. + +[id="{version}-plugins-{type}s-{plugin}-rfc"] +===== `rfc` + + * Value can be any of: `rfc3164`, `rfc5424` + * Default value is `"rfc3164"` + +syslog message format: you can choose between rfc3164 or rfc5424 + +[id="{version}-plugins-{type}s-{plugin}-severity"] +===== `severity` + + * Value type is <> + * Default value is `"notice"` + +severity label for syslog message +default fallback to notice as in rfc3164 +The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-sourcehost"] +===== `sourcehost` + + * Value type is <> + * Default value is `"%{host}"` + +source host for syslog message. The new value can include `%{foo}` strings +to help you build a new value from other parts of the event. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` + + * Value type is <> + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `false` + +Verify the identity of the other end of the SSL connection against the CA. + +[id="{version}-plugins-{type}s-{plugin}-use_labels"] +===== `use_labels` + + * Value type is <> + * Default value is `true` + +use label parsing for severity and facility levels +use priority field if set to false + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/tcp-index.asciidoc b/docs/versioned-plugins/outputs/tcp-index.asciidoc new file mode 100644 index 000000000..5919b7d83 --- /dev/null +++ b/docs/versioned-plugins/outputs/tcp-index.asciidoc @@ -0,0 +1,20 @@ +:plugin: tcp +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-08-01 +| <> | 2017-08-18 +| <> | 2017-06-23 +|======================================================================= + +include::tcp-v5.0.2.asciidoc[] +include::tcp-v5.0.1.asciidoc[] +include::tcp-v5.0.0.asciidoc[] +include::tcp-v4.0.2.asciidoc[] +include::tcp-v4.0.1.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/tcp-v4.0.1.asciidoc b/docs/versioned-plugins/outputs/tcp-v4.0.1.asciidoc new file mode 100644 index 000000000..2179f1262 --- /dev/null +++ b/docs/versioned-plugins/outputs/tcp-v4.0.1.asciidoc @@ -0,0 +1,158 @@ +:plugin: tcp +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-tcp/blob/v4.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Tcp output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events over a TCP socket. + +Each event json is separated by a newline. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-message_format"] +===== `message_format` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +The format to use when writing events to the file. This value +supports any string and can include `%{name}` and other dynamic +strings. + +If this setting is omitted, the full json representation of the +event will be written as a single line. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"client"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `10` + +When connect failed,retry interval in sec. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` + + * Value type is <> + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `false` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/tcp-v4.0.2.asciidoc b/docs/versioned-plugins/outputs/tcp-v4.0.2.asciidoc new file mode 100644 index 000000000..8dd59e103 --- /dev/null +++ b/docs/versioned-plugins/outputs/tcp-v4.0.2.asciidoc @@ -0,0 +1,158 @@ +:plugin: tcp +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v4.0.2 +:release_date: 2017-08-18 +:changelog_url: https://github.com/logstash-plugins/logstash-output-tcp/blob/v4.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events over a TCP socket. + +Each event json is separated by a newline. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-message_format"] +===== `message_format` (DEPRECATED) + + * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. + * Value type is <> + * There is no default value for this setting. + +The format to use when writing events to the file. This value +supports any string and can include `%{name}` and other dynamic +strings. + +If this setting is omitted, the full json representation of the +event will be written as a single line. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"client"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `10` + +When connect failed,retry interval in sec. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` + + * Value type is <> + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `false` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/tcp-v5.0.0.asciidoc b/docs/versioned-plugins/outputs/tcp-v5.0.0.asciidoc new file mode 100644 index 000000000..c46777d17 --- /dev/null +++ b/docs/versioned-plugins/outputs/tcp-v5.0.0.asciidoc @@ -0,0 +1,144 @@ +:plugin: tcp +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.0 +:release_date: 2017-08-01 +:changelog_url: https://github.com/logstash-plugins/logstash-output-tcp/blob/v5.0.0/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events over a TCP socket. + +Each event json is separated by a newline. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"client"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `10` + +When connect failed,retry interval in sec. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` + + * Value type is <> + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `false` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/tcp-v5.0.1.asciidoc b/docs/versioned-plugins/outputs/tcp-v5.0.1.asciidoc new file mode 100644 index 000000000..ba365b42d --- /dev/null +++ b/docs/versioned-plugins/outputs/tcp-v5.0.1.asciidoc @@ -0,0 +1,144 @@ +:plugin: tcp +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.1 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-tcp/blob/v5.0.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events over a TCP socket. + +Each event json is separated by a newline. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"client"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `10` + +When connect failed,retry interval in sec. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` + + * Value type is <> + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `false` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/tcp-v5.0.2.asciidoc b/docs/versioned-plugins/outputs/tcp-v5.0.2.asciidoc new file mode 100644 index 000000000..e4b0257f0 --- /dev/null +++ b/docs/versioned-plugins/outputs/tcp-v5.0.2.asciidoc @@ -0,0 +1,144 @@ +:plugin: tcp +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v5.0.2 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-tcp/blob/v5.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Tcp output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events over a TCP socket. + +Each event json is separated by a newline. + +Can either accept connections from clients or connect to a server, +depending on `mode`. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Tcp Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the address to listen on. +When mode is `client`, the address to connect to. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"client"` + +Mode to operate in. `server` listens for client connections, +`client` connects to a server. + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +When mode is `server`, the port to listen on. +When mode is `client`, the port to connect to. + +[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] +===== `reconnect_interval` + + * Value type is <> + * Default value is `10` + +When connect failed,retry interval in sec. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] +===== `ssl_cacert` + + * Value type is <> + * There is no default value for this setting. + +The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +SSL certificate path + +[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] +===== `ssl_enable` + + * Value type is <> + * Default value is `false` + +Enable SSL (must be set for other `ssl_` options to take effect). + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +SSL key path + +[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] +===== `ssl_key_passphrase` + + * Value type is <> + * Default value is `nil` + +SSL key passphrase + +[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] +===== `ssl_verify` + + * Value type is <> + * Default value is `false` + +Verify the identity of the other end of the SSL connection against the CA. +For input, sets the field `sslsubject` to that of the client certificate. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/timber-index.asciidoc b/docs/versioned-plugins/outputs/timber-index.asciidoc new file mode 100644 index 000000000..64662fcfb --- /dev/null +++ b/docs/versioned-plugins/outputs/timber-index.asciidoc @@ -0,0 +1,12 @@ +:plugin: timber +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-09-02 +|======================================================================= + +include::timber-v1.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/timber-v1.0.3.asciidoc b/docs/versioned-plugins/outputs/timber-v1.0.3.asciidoc new file mode 100644 index 000000000..fbe37b524 --- /dev/null +++ b/docs/versioned-plugins/outputs/timber-v1.0.3.asciidoc @@ -0,0 +1,228 @@ +:plugin: timber +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v1.0.3 +:release_date: 2017-09-02 +:changelog_url: https://github.com/logstash-plugins/logstash-output-timber/blob/v1.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Timber output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output sends structured events to the https://timber.io[Timber.io logging service]. +Timber is a cloud-based logging service designed for developers, providing easy features +out of the box that make you more productive. +https://timber.io/docs/app/console/tail-a-user[Tail users], +https://timber.io/docs/app/console/trace-http-requests[trace requests], +https://timber.io/docs/app/console/inspect-http-requests[inspect HTTP parameters], +and https://timber.io/docs/app/console/searching[search] on rich structured data without +sacrificing readability. + +Internally, it's a highly efficient HTTP transport that uses batching and retries for +fast and reliable delivery. + +This output will execute up to 'pool_max' requests in parallel for performance. +Consider this when tuning this plugin for performance. The default of 50 should +be sufficient for most setups. + +Additionally, note that when parallel execution is used strict ordering of events is not +guaranteed! + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Timber Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-api_key>> |Your Timber.io API key|No +| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No +| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-api_key"] +===== `api_key` + + * Value type is <> + * There is no default value for this setting. + +Your Timber.io API key. You can obtain your API by creating an app in the +[Timber console](https://app.timber.io). + + +[id="{version}-plugins-{type}s-{plugin}-cacert"] +===== `cacert` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom X.509 CA (.pem certs) specify the path to that here. + + +[id="{version}-plugins-{type}s-{plugin}-client_cert"] +===== `client_cert` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here + + +[id="{version}-plugins-{type}s-{plugin}-client_key"] +===== `client_key` + + * Value type is <> + * There is no default value for this setting. + +If you're using a client certificate specify the path to the encryption key here + + +[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] +===== `connect_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for a connection to be established. Default is `10s` + + +[id="{version}-plugins-{type}s-{plugin}-keystore"] +===== `keystore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! + + +[id="{version}-plugins-{type}s-{plugin}-keystore_password"] +===== `keystore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the keystore password here. +Note, most .jks files created with keytool require a password! + + +[id="{version}-plugins-{type}s-{plugin}-keystore_type"] +===== `keystore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` + + +[id="{version}-plugins-{type}s-{plugin}-pool_max"] +===== `pool_max` + + * Value type is <> + * Default value is `50` + +Max number of concurrent connections. Defaults to `50` + + +[id="{version}-plugins-{type}s-{plugin}-proxy"] +===== `proxy` + + * Value type is <> + * There is no default value for this setting. + +If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: + +1. Proxy host in form: `http://proxy.org:1234` +2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` +3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` + + +[id="{version}-plugins-{type}s-{plugin}-request_timeout"] +===== `request_timeout` + + * Value type is <> + * Default value is `60` + +This module makes it easy to add a very fully configured HTTP client to logstash +based on [Manticore](https://github.com/cheald/manticore). +For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller +Timeout (in seconds) for the entire request + + +[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] +===== `socket_timeout` + + * Value type is <> + * Default value is `10` + +Timeout (in seconds) to wait for data on the socket. Default is `10s` + + +[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] +===== `ssl_certificate_validation` + + * Value type is <> + * Default value is `true` + +Set this to false to disable SSL/TLS certificate validation +Note: setting this to false is generally considered insecure! + + +[id="{version}-plugins-{type}s-{plugin}-truststore"] +===== `truststore` + + * Value type is <> + * There is no default value for this setting. + +If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! + + +[id="{version}-plugins-{type}s-{plugin}-truststore_password"] +===== `truststore_password` + + * Value type is <> + * There is no default value for this setting. + +Specify the truststore password here. +Note, most .jks files created with keytool require a password! + + +[id="{version}-plugins-{type}s-{plugin}-truststore_type"] +===== `truststore_type` + + * Value type is <> + * Default value is `"JKS"` + +Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/udp-index.asciidoc b/docs/versioned-plugins/outputs/udp-index.asciidoc new file mode 100644 index 000000000..9404b6832 --- /dev/null +++ b/docs/versioned-plugins/outputs/udp-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: udp +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::udp-v3.0.5.asciidoc[] +include::udp-v3.0.4.asciidoc[] +include::udp-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/udp-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/udp-v3.0.3.asciidoc new file mode 100644 index 000000000..7621f4675 --- /dev/null +++ b/docs/versioned-plugins/outputs/udp-v3.0.3.asciidoc @@ -0,0 +1,65 @@ +:plugin: udp +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-udp/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Udp output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Send events over UDP + +Keep in mind that UDP will lose messages. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Udp Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The address to send messages to + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to send messages on + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/udp-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/udp-v3.0.4.asciidoc new file mode 100644 index 000000000..d186993ed --- /dev/null +++ b/docs/versioned-plugins/outputs/udp-v3.0.4.asciidoc @@ -0,0 +1,65 @@ +:plugin: udp +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-udp/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Udp output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Send events over UDP + +Keep in mind that UDP will lose messages. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Udp Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The address to send messages to + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to send messages on + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/udp-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/udp-v3.0.5.asciidoc new file mode 100644 index 000000000..45c7ab8a5 --- /dev/null +++ b/docs/versioned-plugins/outputs/udp-v3.0.5.asciidoc @@ -0,0 +1,65 @@ +:plugin: udp +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-udp/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Udp output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Send events over UDP + +Keep in mind that UDP will lose messages. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Udp Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The address to send messages to + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The port to send messages on + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/webhdfs-index.asciidoc b/docs/versioned-plugins/outputs/webhdfs-index.asciidoc new file mode 100644 index 000000000..1512342fc --- /dev/null +++ b/docs/versioned-plugins/outputs/webhdfs-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: webhdfs +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::webhdfs-v3.0.5.asciidoc[] +include::webhdfs-v3.0.4.asciidoc[] +include::webhdfs-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/webhdfs-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/webhdfs-v3.0.3.asciidoc new file mode 100644 index 000000000..8a6e5fb1f --- /dev/null +++ b/docs/versioned-plugins/outputs/webhdfs-v3.0.3.asciidoc @@ -0,0 +1,293 @@ +:plugin: webhdfs +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-webhdfs/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Webhdfs output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This plugin sends Logstash events into files in HDFS via +the https://hadoop.apache.org/docs/r1.0.4/webhdfs.html[webhdfs] REST API. + +==== Dependencies +This plugin has no dependency on jars from hadoop, thus reducing configuration and compatibility +problems. It uses the webhdfs gem from Kazuki Ohta and TAGOMORI Satoshi (@see: https://github.com/kzk/webhdfs). +Optional dependencies are zlib and snappy gem if you use the compression functionality. + +==== Operational Notes +If you get an error like: + + Max write retries reached. Exception: initialize: name or service not known {:level=>:error} + +make sure that the hostname of your namenode is resolvable on the host running Logstash. When creating/appending +to a file, webhdfs somtime sends a `307 TEMPORARY_REDIRECT` with the `HOSTNAME` of the machine its running on. + +==== Usage +This is an example of Logstash config: + +[source,ruby] +---------------------------------- +input { + ... +} +filter { + ... +} +output { + webhdfs { + host => "127.0.0.1" # (required) + port => 50070 # (optional, default: 50070) + path => "/user/logstash/dt=%{+YYYY-MM-dd}/logstash-%{+HH}.log" # (required) + user => "hue" # (required) + } +} +---------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Webhdfs Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-compression>> |<>, one of `["none", "snappy", "gzip"]`|No +| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_keytab>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-open_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-read_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_known_errors>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_times>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-single_file_per_thread>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-snappy_bufsize>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-snappy_format>> |<>, one of `["stream", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-standby_host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-standby_port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_httpfs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_kerberos_auth>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_ssl_auth>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-compression"] +===== `compression` + + * Value can be any of: `none`, `snappy`, `gzip` + * Default value is `"none"` + +Compress output. One of ['none', 'snappy', 'gzip'] + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` + + * Value type is <> + * Default value is `500` + +Sending data to webhdfs if event count is above, even if `store_interval_in_secs` is not reached. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The server name for webhdfs/httpfs connections. + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` + + * Value type is <> + * Default value is `1` + +Sending data to webhdfs in x seconds intervals. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_keytab"] +===== `kerberos_keytab` + + * Value type is <> + * There is no default value for this setting. + +Set kerberos keytab file. Note that the gssapi library needs to be available to use this. + +[id="{version}-plugins-{type}s-{plugin}-open_timeout"] +===== `open_timeout` + + * Value type is <> + * Default value is `30` + +WebHdfs open timeout, default 30s. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The path to the file to write to. Event fields can be used here, +as well as date fields in the joda time format, e.g.: +`/user/logstash/dt=%{+YYYY-MM-dd}/%{@source_host}-%{+HH}.log` + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `50070` + +The server port for webhdfs/httpfs connections. + +[id="{version}-plugins-{type}s-{plugin}-read_timeout"] +===== `read_timeout` + + * Value type is <> + * Default value is `30` + +The WebHdfs read timeout, default 30s. + +[id="{version}-plugins-{type}s-{plugin}-retry_interval"] +===== `retry_interval` + + * Value type is <> + * Default value is `0.5` + +How long should we wait between retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_known_errors"] +===== `retry_known_errors` + + * Value type is <> + * Default value is `true` + +Retry some known webhdfs errors. These may be caused by race conditions when appending to same file, etc. + +[id="{version}-plugins-{type}s-{plugin}-retry_times"] +===== `retry_times` + + * Value type is <> + * Default value is `5` + +How many times should we retry. If retry_times is exceeded, an error will be logged and the event will be discarded. + +[id="{version}-plugins-{type}s-{plugin}-single_file_per_thread"] +===== `single_file_per_thread` + + * Value type is <> + * Default value is `false` + +Avoid appending to same file in multiple threads. +This solves some problems with multiple logstash output threads and locked file leases in webhdfs. +If this option is set to true, %{[@metadata][thread_id]} needs to be used in path config settting. + +[id="{version}-plugins-{type}s-{plugin}-snappy_bufsize"] +===== `snappy_bufsize` + + * Value type is <> + * Default value is `32768` + +Set snappy chunksize. Only neccessary for stream format. Defaults to 32k. Max is 65536 +@see http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt + +[id="{version}-plugins-{type}s-{plugin}-snappy_format"] +===== `snappy_format` + + * Value can be any of: `stream`, `file` + * Default value is `"stream"` + +Set snappy format. One of "stream", "file". Set to stream to be hive compatible. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +Set ssl cert file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +Set ssl key file. + +[id="{version}-plugins-{type}s-{plugin}-standby_host"] +===== `standby_host` + + * Value type is <> + * Default value is `false` + +Standby namenode for ha hdfs. + +[id="{version}-plugins-{type}s-{plugin}-standby_port"] +===== `standby_port` + + * Value type is <> + * Default value is `50070` + +Standby namenode port for ha hdfs. + +[id="{version}-plugins-{type}s-{plugin}-use_httpfs"] +===== `use_httpfs` + + * Value type is <> + * Default value is `false` + +Use httpfs mode if set to true, else webhdfs. + +[id="{version}-plugins-{type}s-{plugin}-use_kerberos_auth"] +===== `use_kerberos_auth` + + * Value type is <> + * Default value is `false` + +Set kerberos authentication. + +[id="{version}-plugins-{type}s-{plugin}-use_ssl_auth"] +===== `use_ssl_auth` + + * Value type is <> + * Default value is `false` + +Set ssl authentication. Note that the openssl library needs to be available to use this. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The Username for webhdfs. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/webhdfs-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/webhdfs-v3.0.4.asciidoc new file mode 100644 index 000000000..7cdab4354 --- /dev/null +++ b/docs/versioned-plugins/outputs/webhdfs-v3.0.4.asciidoc @@ -0,0 +1,293 @@ +:plugin: webhdfs +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-webhdfs/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Webhdfs output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This plugin sends Logstash events into files in HDFS via +the https://hadoop.apache.org/docs/r1.0.4/webhdfs.html[webhdfs] REST API. + +==== Dependencies +This plugin has no dependency on jars from hadoop, thus reducing configuration and compatibility +problems. It uses the webhdfs gem from Kazuki Ohta and TAGOMORI Satoshi (@see: https://github.com/kzk/webhdfs). +Optional dependencies are zlib and snappy gem if you use the compression functionality. + +==== Operational Notes +If you get an error like: + + Max write retries reached. Exception: initialize: name or service not known {:level=>:error} + +make sure that the hostname of your namenode is resolvable on the host running Logstash. When creating/appending +to a file, webhdfs somtime sends a `307 TEMPORARY_REDIRECT` with the `HOSTNAME` of the machine its running on. + +==== Usage +This is an example of Logstash config: + +[source,ruby] +---------------------------------- +input { + ... +} +filter { + ... +} +output { + webhdfs { + host => "127.0.0.1" # (required) + port => 50070 # (optional, default: 50070) + path => "/user/logstash/dt=%{+YYYY-MM-dd}/logstash-%{+HH}.log" # (required) + user => "hue" # (required) + } +} +---------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Webhdfs Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-compression>> |<>, one of `["none", "snappy", "gzip"]`|No +| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_keytab>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-open_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-read_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_known_errors>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_times>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-single_file_per_thread>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-snappy_bufsize>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-snappy_format>> |<>, one of `["stream", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-standby_host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-standby_port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_httpfs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_kerberos_auth>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_ssl_auth>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-compression"] +===== `compression` + + * Value can be any of: `none`, `snappy`, `gzip` + * Default value is `"none"` + +Compress output. One of ['none', 'snappy', 'gzip'] + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` + + * Value type is <> + * Default value is `500` + +Sending data to webhdfs if event count is above, even if `store_interval_in_secs` is not reached. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The server name for webhdfs/httpfs connections. + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` + + * Value type is <> + * Default value is `1` + +Sending data to webhdfs in x seconds intervals. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_keytab"] +===== `kerberos_keytab` + + * Value type is <> + * There is no default value for this setting. + +Set kerberos keytab file. Note that the gssapi library needs to be available to use this. + +[id="{version}-plugins-{type}s-{plugin}-open_timeout"] +===== `open_timeout` + + * Value type is <> + * Default value is `30` + +WebHdfs open timeout, default 30s. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The path to the file to write to. Event fields can be used here, +as well as date fields in the joda time format, e.g.: +`/user/logstash/dt=%{+YYYY-MM-dd}/%{@source_host}-%{+HH}.log` + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `50070` + +The server port for webhdfs/httpfs connections. + +[id="{version}-plugins-{type}s-{plugin}-read_timeout"] +===== `read_timeout` + + * Value type is <> + * Default value is `30` + +The WebHdfs read timeout, default 30s. + +[id="{version}-plugins-{type}s-{plugin}-retry_interval"] +===== `retry_interval` + + * Value type is <> + * Default value is `0.5` + +How long should we wait between retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_known_errors"] +===== `retry_known_errors` + + * Value type is <> + * Default value is `true` + +Retry some known webhdfs errors. These may be caused by race conditions when appending to same file, etc. + +[id="{version}-plugins-{type}s-{plugin}-retry_times"] +===== `retry_times` + + * Value type is <> + * Default value is `5` + +How many times should we retry. If retry_times is exceeded, an error will be logged and the event will be discarded. + +[id="{version}-plugins-{type}s-{plugin}-single_file_per_thread"] +===== `single_file_per_thread` + + * Value type is <> + * Default value is `false` + +Avoid appending to same file in multiple threads. +This solves some problems with multiple logstash output threads and locked file leases in webhdfs. +If this option is set to true, %{[@metadata][thread_id]} needs to be used in path config settting. + +[id="{version}-plugins-{type}s-{plugin}-snappy_bufsize"] +===== `snappy_bufsize` + + * Value type is <> + * Default value is `32768` + +Set snappy chunksize. Only neccessary for stream format. Defaults to 32k. Max is 65536 +@see http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt + +[id="{version}-plugins-{type}s-{plugin}-snappy_format"] +===== `snappy_format` + + * Value can be any of: `stream`, `file` + * Default value is `"stream"` + +Set snappy format. One of "stream", "file". Set to stream to be hive compatible. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +Set ssl cert file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +Set ssl key file. + +[id="{version}-plugins-{type}s-{plugin}-standby_host"] +===== `standby_host` + + * Value type is <> + * Default value is `false` + +Standby namenode for ha hdfs. + +[id="{version}-plugins-{type}s-{plugin}-standby_port"] +===== `standby_port` + + * Value type is <> + * Default value is `50070` + +Standby namenode port for ha hdfs. + +[id="{version}-plugins-{type}s-{plugin}-use_httpfs"] +===== `use_httpfs` + + * Value type is <> + * Default value is `false` + +Use httpfs mode if set to true, else webhdfs. + +[id="{version}-plugins-{type}s-{plugin}-use_kerberos_auth"] +===== `use_kerberos_auth` + + * Value type is <> + * Default value is `false` + +Set kerberos authentication. + +[id="{version}-plugins-{type}s-{plugin}-use_ssl_auth"] +===== `use_ssl_auth` + + * Value type is <> + * Default value is `false` + +Set ssl authentication. Note that the openssl library needs to be available to use this. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The Username for webhdfs. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/webhdfs-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/webhdfs-v3.0.5.asciidoc new file mode 100644 index 000000000..0f57ef8a5 --- /dev/null +++ b/docs/versioned-plugins/outputs/webhdfs-v3.0.5.asciidoc @@ -0,0 +1,293 @@ +:plugin: webhdfs +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-webhdfs/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Webhdfs output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This plugin sends Logstash events into files in HDFS via +the https://hadoop.apache.org/docs/r1.0.4/webhdfs.html[webhdfs] REST API. + +==== Dependencies +This plugin has no dependency on jars from hadoop, thus reducing configuration and compatibility +problems. It uses the webhdfs gem from Kazuki Ohta and TAGOMORI Satoshi (@see: https://github.com/kzk/webhdfs). +Optional dependencies are zlib and snappy gem if you use the compression functionality. + +==== Operational Notes +If you get an error like: + + Max write retries reached. Exception: initialize: name or service not known {:level=>:error} + +make sure that the hostname of your namenode is resolvable on the host running Logstash. When creating/appending +to a file, webhdfs somtime sends a `307 TEMPORARY_REDIRECT` with the `HOSTNAME` of the machine its running on. + +==== Usage +This is an example of Logstash config: + +[source,ruby] +---------------------------------- +input { + ... +} +filter { + ... +} +output { + webhdfs { + host => "127.0.0.1" # (required) + port => 50070 # (optional, default: 50070) + path => "/user/logstash/dt=%{+YYYY-MM-dd}/logstash-%{+HH}.log" # (required) + user => "hue" # (required) + } +} +---------------------------------- + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Webhdfs Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-compression>> |<>, one of `["none", "snappy", "gzip"]`|No +| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-kerberos_keytab>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-open_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-read_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_interval>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_known_errors>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-retry_times>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-single_file_per_thread>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-snappy_bufsize>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-snappy_format>> |<>, one of `["stream", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-standby_host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-standby_port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_httpfs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_kerberos_auth>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-use_ssl_auth>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-compression"] +===== `compression` + + * Value can be any of: `none`, `snappy`, `gzip` + * Default value is `"none"` + +Compress output. One of ['none', 'snappy', 'gzip'] + +[id="{version}-plugins-{type}s-{plugin}-flush_size"] +===== `flush_size` + + * Value type is <> + * Default value is `500` + +Sending data to webhdfs if event count is above, even if `store_interval_in_secs` is not reached. + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The server name for webhdfs/httpfs connections. + +[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] +===== `idle_flush_time` + + * Value type is <> + * Default value is `1` + +Sending data to webhdfs in x seconds intervals. + +[id="{version}-plugins-{type}s-{plugin}-kerberos_keytab"] +===== `kerberos_keytab` + + * Value type is <> + * There is no default value for this setting. + +Set kerberos keytab file. Note that the gssapi library needs to be available to use this. + +[id="{version}-plugins-{type}s-{plugin}-open_timeout"] +===== `open_timeout` + + * Value type is <> + * Default value is `30` + +WebHdfs open timeout, default 30s. + +[id="{version}-plugins-{type}s-{plugin}-path"] +===== `path` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The path to the file to write to. Event fields can be used here, +as well as date fields in the joda time format, e.g.: +`/user/logstash/dt=%{+YYYY-MM-dd}/%{@source_host}-%{+HH}.log` + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `50070` + +The server port for webhdfs/httpfs connections. + +[id="{version}-plugins-{type}s-{plugin}-read_timeout"] +===== `read_timeout` + + * Value type is <> + * Default value is `30` + +The WebHdfs read timeout, default 30s. + +[id="{version}-plugins-{type}s-{plugin}-retry_interval"] +===== `retry_interval` + + * Value type is <> + * Default value is `0.5` + +How long should we wait between retries. + +[id="{version}-plugins-{type}s-{plugin}-retry_known_errors"] +===== `retry_known_errors` + + * Value type is <> + * Default value is `true` + +Retry some known webhdfs errors. These may be caused by race conditions when appending to same file, etc. + +[id="{version}-plugins-{type}s-{plugin}-retry_times"] +===== `retry_times` + + * Value type is <> + * Default value is `5` + +How many times should we retry. If retry_times is exceeded, an error will be logged and the event will be discarded. + +[id="{version}-plugins-{type}s-{plugin}-single_file_per_thread"] +===== `single_file_per_thread` + + * Value type is <> + * Default value is `false` + +Avoid appending to same file in multiple threads. +This solves some problems with multiple logstash output threads and locked file leases in webhdfs. +If this option is set to true, %{[@metadata][thread_id]} needs to be used in path config settting. + +[id="{version}-plugins-{type}s-{plugin}-snappy_bufsize"] +===== `snappy_bufsize` + + * Value type is <> + * Default value is `32768` + +Set snappy chunksize. Only neccessary for stream format. Defaults to 32k. Max is 65536 +@see http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt + +[id="{version}-plugins-{type}s-{plugin}-snappy_format"] +===== `snappy_format` + + * Value can be any of: `stream`, `file` + * Default value is `"stream"` + +Set snappy format. One of "stream", "file". Set to stream to be hive compatible. + +[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] +===== `ssl_cert` + + * Value type is <> + * There is no default value for this setting. + +Set ssl cert file. + +[id="{version}-plugins-{type}s-{plugin}-ssl_key"] +===== `ssl_key` + + * Value type is <> + * There is no default value for this setting. + +Set ssl key file. + +[id="{version}-plugins-{type}s-{plugin}-standby_host"] +===== `standby_host` + + * Value type is <> + * Default value is `false` + +Standby namenode for ha hdfs. + +[id="{version}-plugins-{type}s-{plugin}-standby_port"] +===== `standby_port` + + * Value type is <> + * Default value is `50070` + +Standby namenode port for ha hdfs. + +[id="{version}-plugins-{type}s-{plugin}-use_httpfs"] +===== `use_httpfs` + + * Value type is <> + * Default value is `false` + +Use httpfs mode if set to true, else webhdfs. + +[id="{version}-plugins-{type}s-{plugin}-use_kerberos_auth"] +===== `use_kerberos_auth` + + * Value type is <> + * Default value is `false` + +Set kerberos authentication. + +[id="{version}-plugins-{type}s-{plugin}-use_ssl_auth"] +===== `use_ssl_auth` + + * Value type is <> + * Default value is `false` + +Set ssl authentication. Note that the openssl library needs to be available to use this. + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The Username for webhdfs. + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/websocket-index.asciidoc b/docs/versioned-plugins/outputs/websocket-index.asciidoc new file mode 100644 index 000000000..4ea61ad44 --- /dev/null +++ b/docs/versioned-plugins/outputs/websocket-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: websocket +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::websocket-v3.0.4.asciidoc[] +include::websocket-v3.0.3.asciidoc[] +include::websocket-v3.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/websocket-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/websocket-v3.0.2.asciidoc new file mode 100644 index 000000000..54f767166 --- /dev/null +++ b/docs/versioned-plugins/outputs/websocket-v3.0.2.asciidoc @@ -0,0 +1,66 @@ +:plugin: websocket +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-websocket/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Websocket output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output runs a websocket server and publishes any +messages to all connected websocket clients. + +You can connect to it with ws://:/ + +If no clients are connected, any messages received are ignored. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Websocket Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address to serve websocket data from + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `3232` + +The port to serve websocket data from + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/websocket-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/websocket-v3.0.3.asciidoc new file mode 100644 index 000000000..dfde774b4 --- /dev/null +++ b/docs/versioned-plugins/outputs/websocket-v3.0.3.asciidoc @@ -0,0 +1,66 @@ +:plugin: websocket +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-websocket/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Websocket output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output runs a websocket server and publishes any +messages to all connected websocket clients. + +You can connect to it with ws://:/ + +If no clients are connected, any messages received are ignored. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Websocket Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address to serve websocket data from + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `3232` + +The port to serve websocket data from + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/websocket-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/websocket-v3.0.4.asciidoc new file mode 100644 index 000000000..de6bb13b8 --- /dev/null +++ b/docs/versioned-plugins/outputs/websocket-v3.0.4.asciidoc @@ -0,0 +1,66 @@ +:plugin: websocket +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-websocket/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Websocket output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output runs a websocket server and publishes any +messages to all connected websocket clients. + +You can connect to it with ws://:/ + +If no clients are connected, any messages received are ignored. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Websocket Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * Default value is `"0.0.0.0"` + +The address to serve websocket data from + +[id="{version}-plugins-{type}s-{plugin}-port"] +===== `port` + + * Value type is <> + * Default value is `3232` + +The port to serve websocket data from + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/xmpp-index.asciidoc b/docs/versioned-plugins/outputs/xmpp-index.asciidoc new file mode 100644 index 000000000..1ecfabf15 --- /dev/null +++ b/docs/versioned-plugins/outputs/xmpp-index.asciidoc @@ -0,0 +1,20 @@ +:plugin: xmpp +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-27 +| <> | 2017-06-23 +| <> | 2017-05-03 +|======================================================================= + +include::xmpp-v3.0.7.asciidoc[] +include::xmpp-v3.0.6.asciidoc[] +include::xmpp-v3.0.5.asciidoc[] +include::xmpp-v3.0.4.asciidoc[] +include::xmpp-v3.0.3.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/xmpp-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/xmpp-v3.0.3.asciidoc new file mode 100644 index 000000000..8956c4a2c --- /dev/null +++ b/docs/versioned-plugins/outputs/xmpp-v3.0.3.asciidoc @@ -0,0 +1,104 @@ +:plugin: xmpp +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-05-03 +:changelog_url: https://github.com/logstash-plugins/logstash-output-xmpp/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Xmpp + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you ship events over XMPP/Jabber. + +This plugin can be used for posting events to humans over XMPP, or you can +use it for PubSub or general message passing for logstash to logstash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Xmpp Output Configuration Options + +This plugin supports the following configuration options plus the <> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-users>> |<>|No +|======================================================================= + +Also see <> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * There is no default value for this setting. + +The xmpp server to connect to. This is optional. If you omit this setting, +the host on the user/identity is used. (foo.com for user@foo.com) + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The message to send. This supports dynamic strings like `%{host}` + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The xmpp password for the user/identity. + +[id="{version}-plugins-{type}s-{plugin}-rooms"] +===== `rooms` + + * Value type is <> + * There is no default value for this setting. + +if muc/multi-user-chat required, give the name of the room that +you want to join: room@conference.domain/nick + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The user or resource ID, like foo@example.com. + +[id="{version}-plugins-{type}s-{plugin}-users"] +===== `users` + + * Value type is <> + * There is no default value for this setting. + +The users to send messages to + + + +include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/xmpp-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/xmpp-v3.0.4.asciidoc new file mode 100644 index 000000000..cdf1a6eac --- /dev/null +++ b/docs/versioned-plugins/outputs/xmpp-v3.0.4.asciidoc @@ -0,0 +1,105 @@ +:plugin: xmpp +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-xmpp/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Xmpp output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you ship events over XMPP/Jabber. + +This plugin can be used for posting events to humans over XMPP, or you can +use it for PubSub or general message passing for logstash to logstash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Xmpp Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-users>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * There is no default value for this setting. + +The xmpp server to connect to. This is optional. If you omit this setting, +the host on the user/identity is used. (foo.com for user@foo.com) + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The message to send. This supports dynamic strings like `%{host}` + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The xmpp password for the user/identity. + +[id="{version}-plugins-{type}s-{plugin}-rooms"] +===== `rooms` + + * Value type is <> + * There is no default value for this setting. + +if muc/multi-user-chat required, give the name of the room that +you want to join: room@conference.domain/nick + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The user or resource ID, like foo@example.com. + +[id="{version}-plugins-{type}s-{plugin}-users"] +===== `users` + + * Value type is <> + * There is no default value for this setting. + +The users to send messages to + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/xmpp-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/xmpp-v3.0.5.asciidoc new file mode 100644 index 000000000..81bd598ed --- /dev/null +++ b/docs/versioned-plugins/outputs/xmpp-v3.0.5.asciidoc @@ -0,0 +1,105 @@ +:plugin: xmpp +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.5 +:release_date: 2017-06-27 +:changelog_url: https://github.com/logstash-plugins/logstash-output-xmpp/blob/v3.0.5/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Xmpp output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you ship events over XMPP/Jabber. + +This plugin can be used for posting events to humans over XMPP, or you can +use it for PubSub or general message passing for logstash to logstash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Xmpp Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-users>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * There is no default value for this setting. + +The xmpp server to connect to. This is optional. If you omit this setting, +the host on the user/identity is used. (foo.com for user@foo.com) + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The message to send. This supports dynamic strings like `%{host}` + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The xmpp password for the user/identity. + +[id="{version}-plugins-{type}s-{plugin}-rooms"] +===== `rooms` + + * Value type is <> + * There is no default value for this setting. + +if muc/multi-user-chat required, give the name of the room that +you want to join: room@conference.domain/nick + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The user or resource ID, like foo@example.com. + +[id="{version}-plugins-{type}s-{plugin}-users"] +===== `users` + + * Value type is <> + * There is no default value for this setting. + +The users to send messages to + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/xmpp-v3.0.6.asciidoc b/docs/versioned-plugins/outputs/xmpp-v3.0.6.asciidoc new file mode 100644 index 000000000..4ca7772b1 --- /dev/null +++ b/docs/versioned-plugins/outputs/xmpp-v3.0.6.asciidoc @@ -0,0 +1,105 @@ +:plugin: xmpp +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.6 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-xmpp/blob/v3.0.6/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Xmpp output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you ship events over XMPP/Jabber. + +This plugin can be used for posting events to humans over XMPP, or you can +use it for PubSub or general message passing for logstash to logstash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Xmpp Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-users>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * There is no default value for this setting. + +The xmpp server to connect to. This is optional. If you omit this setting, +the host on the user/identity is used. (foo.com for user@foo.com) + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The message to send. This supports dynamic strings like `%{host}` + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The xmpp password for the user/identity. + +[id="{version}-plugins-{type}s-{plugin}-rooms"] +===== `rooms` + + * Value type is <> + * There is no default value for this setting. + +if muc/multi-user-chat required, give the name of the room that +you want to join: room@conference.domain/nick + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The user or resource ID, like foo@example.com. + +[id="{version}-plugins-{type}s-{plugin}-users"] +===== `users` + + * Value type is <> + * There is no default value for this setting. + +The users to send messages to + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/xmpp-v3.0.7.asciidoc b/docs/versioned-plugins/outputs/xmpp-v3.0.7.asciidoc new file mode 100644 index 000000000..8edfc1d36 --- /dev/null +++ b/docs/versioned-plugins/outputs/xmpp-v3.0.7.asciidoc @@ -0,0 +1,105 @@ +:plugin: xmpp +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.7 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-xmpp/blob/v3.0.7/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Xmpp output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +This output allows you ship events over XMPP/Jabber. + +This plugin can be used for posting events to humans over XMPP, or you can +use it for PubSub or general message passing for logstash to logstash. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Xmpp Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-message>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-users>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-host"] +===== `host` + + * Value type is <> + * There is no default value for this setting. + +The xmpp server to connect to. This is optional. If you omit this setting, +the host on the user/identity is used. (foo.com for user@foo.com) + +[id="{version}-plugins-{type}s-{plugin}-message"] +===== `message` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The message to send. This supports dynamic strings like `%{host}` + +[id="{version}-plugins-{type}s-{plugin}-password"] +===== `password` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The xmpp password for the user/identity. + +[id="{version}-plugins-{type}s-{plugin}-rooms"] +===== `rooms` + + * Value type is <> + * There is no default value for this setting. + +if muc/multi-user-chat required, give the name of the room that +you want to join: room@conference.domain/nick + +[id="{version}-plugins-{type}s-{plugin}-user"] +===== `user` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The user or resource ID, like foo@example.com. + +[id="{version}-plugins-{type}s-{plugin}-users"] +===== `users` + + * Value type is <> + * There is no default value for this setting. + +The users to send messages to + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/zabbix-index.asciidoc b/docs/versioned-plugins/outputs/zabbix-index.asciidoc new file mode 100644 index 000000000..f01cb1d88 --- /dev/null +++ b/docs/versioned-plugins/outputs/zabbix-index.asciidoc @@ -0,0 +1,16 @@ +:plugin: zabbix +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-08-16 +| <> | 2017-06-23 +|======================================================================= + +include::zabbix-v3.0.4.asciidoc[] +include::zabbix-v3.0.3.asciidoc[] +include::zabbix-v3.0.2.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/zabbix-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/zabbix-v3.0.2.asciidoc new file mode 100644 index 000000000..f5ac0de0b --- /dev/null +++ b/docs/versioned-plugins/outputs/zabbix-v3.0.2.asciidoc @@ -0,0 +1,160 @@ +:plugin: zabbix +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.2 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-zabbix/blob/v3.0.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Zabbix output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The Zabbix output is used to send item data (key/value pairs) to a Zabbix +server. The event `@timestamp` will automatically be associated with the +Zabbix item data. + +The Zabbix Sender protocol is described at +https://www.zabbix.org/wiki/Docs/protocols/zabbix_sender/2.0 +Zabbix uses a kind of nested key/value store. + +[source,txt] + host + ├── item1 + │ └── value1 + ├── item2 + │ └── value2 + ├── ... + │ └── ... + ├── item_n + │ └── value_n + +Each "host" is an identifier, and each item is associated with that host. +Items are typed on the Zabbix side. You can send numbers as strings and +Zabbix will Do The Right Thing. + +In the Zabbix UI, ensure that your hostname matches the value referenced by +`zabbix_host`. Create the item with the key as it appears in the field +referenced by `zabbix_key`. In the item configuration window, ensure that the +type dropdown is set to Zabbix Trapper. Also be sure to set the type of +information that Zabbix should expect for this item. + +This plugin does not currently send in batches. While it is possible to do +so, this is not supported. Be careful not to flood your Zabbix server with +too many events per second. + +NOTE: This plugin will log a warning if a necessary field is missing. It will +not attempt to resend if Zabbix is down, but will log an error message. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Zabbix Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-multi_value>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-zabbix_host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-zabbix_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-zabbix_server_host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-zabbix_server_port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-zabbix_value>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-multi_value"] +===== `multi_value` + + * Value type is <> + * There is no default value for this setting. + +Use the `multi_value` directive to send multiple key/value pairs. +This can be thought of as an array, like: + +`[ zabbix_key1, zabbix_value1, zabbix_key2, zabbix_value2, ... zabbix_keyN, zabbix_valueN ]` + +...where `zabbix_key1` is an instance of `zabbix_key`, and `zabbix_value1` +is an instance of `zabbix_value`. If the field referenced by any +`zabbix_key` or `zabbix_value` does not exist, that entry will be ignored. + +This directive cannot be used in conjunction with the single-value directives +`zabbix_key` and `zabbix_value`. + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `1` + +The number of seconds to wait before giving up on a connection to the Zabbix +server. This number should be very small, otherwise delays in delivery of +other outputs could result. + +[id="{version}-plugins-{type}s-{plugin}-zabbix_host"] +===== `zabbix_host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field name which holds the Zabbix host name. This can be a sub-field of +the @metadata field. + +[id="{version}-plugins-{type}s-{plugin}-zabbix_key"] +===== `zabbix_key` + + * Value type is <> + * There is no default value for this setting. + +A single field name which holds the value you intend to use as the Zabbix +item key. This can be a sub-field of the @metadata field. +This directive will be ignored if using `multi_value` + +IMPORTANT: `zabbix_key` is required if not using `multi_value`. + + +[id="{version}-plugins-{type}s-{plugin}-zabbix_server_host"] +===== `zabbix_server_host` + + * Value type is <> + * Default value is `"localhost"` + +The IP or resolvable hostname where the Zabbix server is running + +[id="{version}-plugins-{type}s-{plugin}-zabbix_server_port"] +===== `zabbix_server_port` + + * Value type is <> + * Default value is `10051` + +The port on which the Zabbix server is running + +[id="{version}-plugins-{type}s-{plugin}-zabbix_value"] +===== `zabbix_value` + + * Value type is <> + * Default value is `"message"` + +The field name which holds the value you want to send. +This directive will be ignored if using `multi_value` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/zabbix-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/zabbix-v3.0.3.asciidoc new file mode 100644 index 000000000..b07a813b2 --- /dev/null +++ b/docs/versioned-plugins/outputs/zabbix-v3.0.3.asciidoc @@ -0,0 +1,160 @@ +:plugin: zabbix +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.3 +:release_date: 2017-08-16 +:changelog_url: https://github.com/logstash-plugins/logstash-output-zabbix/blob/v3.0.3/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Zabbix output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The Zabbix output is used to send item data (key/value pairs) to a Zabbix +server. The event `@timestamp` will automatically be associated with the +Zabbix item data. + +The Zabbix Sender protocol is described at +https://www.zabbix.org/wiki/Docs/protocols/zabbix_sender/2.0 +Zabbix uses a kind of nested key/value store. + +[source,txt] + host + ├── item1 + │ └── value1 + ├── item2 + │ └── value2 + ├── ... + │ └── ... + ├── item_n + │ └── value_n + +Each "host" is an identifier, and each item is associated with that host. +Items are typed on the Zabbix side. You can send numbers as strings and +Zabbix will Do The Right Thing. + +In the Zabbix UI, ensure that your hostname matches the value referenced by +`zabbix_host`. Create the item with the key as it appears in the field +referenced by `zabbix_key`. In the item configuration window, ensure that the +type dropdown is set to Zabbix Trapper. Also be sure to set the type of +information that Zabbix should expect for this item. + +This plugin does not currently send in batches. While it is possible to do +so, this is not supported. Be careful not to flood your Zabbix server with +too many events per second. + +NOTE: This plugin will log a warning if a necessary field is missing. It will +not attempt to resend if Zabbix is down, but will log an error message. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Zabbix Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-multi_value>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-zabbix_host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-zabbix_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-zabbix_server_host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-zabbix_server_port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-zabbix_value>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-multi_value"] +===== `multi_value` + + * Value type is <> + * There is no default value for this setting. + +Use the `multi_value` directive to send multiple key/value pairs. +This can be thought of as an array, like: + +`[ zabbix_key1, zabbix_value1, zabbix_key2, zabbix_value2, ... zabbix_keyN, zabbix_valueN ]` + +...where `zabbix_key1` is an instance of `zabbix_key`, and `zabbix_value1` +is an instance of `zabbix_value`. If the field referenced by any +`zabbix_key` or `zabbix_value` does not exist, that entry will be ignored. + +This directive cannot be used in conjunction with the single-value directives +`zabbix_key` and `zabbix_value`. + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `1` + +The number of seconds to wait before giving up on a connection to the Zabbix +server. This number should be very small, otherwise delays in delivery of +other outputs could result. + +[id="{version}-plugins-{type}s-{plugin}-zabbix_host"] +===== `zabbix_host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field name which holds the Zabbix host name. This can be a sub-field of +the @metadata field. + +[id="{version}-plugins-{type}s-{plugin}-zabbix_key"] +===== `zabbix_key` + + * Value type is <> + * There is no default value for this setting. + +A single field name which holds the value you intend to use as the Zabbix +item key. This can be a sub-field of the @metadata field. +This directive will be ignored if using `multi_value` + +IMPORTANT: `zabbix_key` is required if not using `multi_value`. + + +[id="{version}-plugins-{type}s-{plugin}-zabbix_server_host"] +===== `zabbix_server_host` + + * Value type is <> + * Default value is `"localhost"` + +The IP or resolvable hostname where the Zabbix server is running + +[id="{version}-plugins-{type}s-{plugin}-zabbix_server_port"] +===== `zabbix_server_port` + + * Value type is <> + * Default value is `10051` + +The port on which the Zabbix server is running + +[id="{version}-plugins-{type}s-{plugin}-zabbix_value"] +===== `zabbix_value` + + * Value type is <> + * Default value is `"message"` + +The field name which holds the value you want to send. +This directive will be ignored if using `multi_value` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/zabbix-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/zabbix-v3.0.4.asciidoc new file mode 100644 index 000000000..440e7616f --- /dev/null +++ b/docs/versioned-plugins/outputs/zabbix-v3.0.4.asciidoc @@ -0,0 +1,160 @@ +:plugin: zabbix +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.0.4 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-zabbix/blob/v3.0.4/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Zabbix output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +The Zabbix output is used to send item data (key/value pairs) to a Zabbix +server. The event `@timestamp` will automatically be associated with the +Zabbix item data. + +The Zabbix Sender protocol is described at +https://www.zabbix.org/wiki/Docs/protocols/zabbix_sender/2.0 +Zabbix uses a kind of nested key/value store. + +[source,txt] + host + ├── item1 + │ └── value1 + ├── item2 + │ └── value2 + ├── ... + │ └── ... + ├── item_n + │ └── value_n + +Each "host" is an identifier, and each item is associated with that host. +Items are typed on the Zabbix side. You can send numbers as strings and +Zabbix will Do The Right Thing. + +In the Zabbix UI, ensure that your hostname matches the value referenced by +`zabbix_host`. Create the item with the key as it appears in the field +referenced by `zabbix_key`. In the item configuration window, ensure that the +type dropdown is set to Zabbix Trapper. Also be sure to set the type of +information that Zabbix should expect for this item. + +This plugin does not currently send in batches. While it is possible to do +so, this is not supported. Be careful not to flood your Zabbix server with +too many events per second. + +NOTE: This plugin will log a warning if a necessary field is missing. It will +not attempt to resend if Zabbix is down, but will log an error message. + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Zabbix Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-multi_value>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-zabbix_host>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-zabbix_key>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-zabbix_server_host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-zabbix_server_port>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-zabbix_value>> |<>|No +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-multi_value"] +===== `multi_value` + + * Value type is <> + * There is no default value for this setting. + +Use the `multi_value` directive to send multiple key/value pairs. +This can be thought of as an array, like: + +`[ zabbix_key1, zabbix_value1, zabbix_key2, zabbix_value2, ... zabbix_keyN, zabbix_valueN ]` + +...where `zabbix_key1` is an instance of `zabbix_key`, and `zabbix_value1` +is an instance of `zabbix_value`. If the field referenced by any +`zabbix_key` or `zabbix_value` does not exist, that entry will be ignored. + +This directive cannot be used in conjunction with the single-value directives +`zabbix_key` and `zabbix_value`. + +[id="{version}-plugins-{type}s-{plugin}-timeout"] +===== `timeout` + + * Value type is <> + * Default value is `1` + +The number of seconds to wait before giving up on a connection to the Zabbix +server. This number should be very small, otherwise delays in delivery of +other outputs could result. + +[id="{version}-plugins-{type}s-{plugin}-zabbix_host"] +===== `zabbix_host` + + * This is a required setting. + * Value type is <> + * There is no default value for this setting. + +The field name which holds the Zabbix host name. This can be a sub-field of +the @metadata field. + +[id="{version}-plugins-{type}s-{plugin}-zabbix_key"] +===== `zabbix_key` + + * Value type is <> + * There is no default value for this setting. + +A single field name which holds the value you intend to use as the Zabbix +item key. This can be a sub-field of the @metadata field. +This directive will be ignored if using `multi_value` + +IMPORTANT: `zabbix_key` is required if not using `multi_value`. + + +[id="{version}-plugins-{type}s-{plugin}-zabbix_server_host"] +===== `zabbix_server_host` + + * Value type is <> + * Default value is `"localhost"` + +The IP or resolvable hostname where the Zabbix server is running + +[id="{version}-plugins-{type}s-{plugin}-zabbix_server_port"] +===== `zabbix_server_port` + + * Value type is <> + * Default value is `10051` + +The port on which the Zabbix server is running + +[id="{version}-plugins-{type}s-{plugin}-zabbix_value"] +===== `zabbix_value` + + * Value type is <> + * Default value is `"message"` + +The field name which holds the value you want to send. +This directive will be ignored if using `multi_value` + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/zeromq-index.asciidoc b/docs/versioned-plugins/outputs/zeromq-index.asciidoc new file mode 100644 index 000000000..1808a3372 --- /dev/null +++ b/docs/versioned-plugins/outputs/zeromq-index.asciidoc @@ -0,0 +1,14 @@ +:plugin: zeromq +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +| <> | 2017-11-13 +| <> | 2017-06-23 +|======================================================================= + +include::zeromq-v3.1.2.asciidoc[] +include::zeromq-v3.1.1.asciidoc[] + diff --git a/docs/versioned-plugins/outputs/zeromq-v3.1.1.asciidoc b/docs/versioned-plugins/outputs/zeromq-v3.1.1.asciidoc new file mode 100644 index 000000000..64f604eef --- /dev/null +++ b/docs/versioned-plugins/outputs/zeromq-v3.1.1.asciidoc @@ -0,0 +1,126 @@ +:plugin: zeromq +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.1 +:release_date: 2017-06-23 +:changelog_url: https://github.com/logstash-plugins/logstash-output-zeromq/blob/v3.1.1/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="plugins-{type}-{plugin}"] + +=== Zeromq output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events to a 0MQ PUB socket. + +You need to have the 0mq 2.1.x library installed to be able to use +this output plugin. + +The default settings will create a publisher connecting to a subscriber +bound to tcp://127.0.0.1:2120 + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Zeromq Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-sockopt>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topic>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topology>> |<>, one of `["pushpull", "pubsub", "pair"]`|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-address"] +===== `address` + + * Value type is <> + * Default value is `["tcp://127.0.0.1:2120"]` + +This will be a performance bottleneck. Someone needs to upgrade this to +concurrency :shared and make sure there is no breakage +0mq socket address to connect or bind. +Please note that `inproc://` will not work with logstashi. +For each we use a context per thread. +By default, inputs bind/listen and outputs connect. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"client"` + +Server mode binds/listens. Client mode connects. + +[id="{version}-plugins-{type}s-{plugin}-sockopt"] +===== `sockopt` + + * Value type is <> + * There is no default value for this setting. + +This exposes zmq_setsockopt for advanced tuning. +See http://api.zeromq.org/2-1:zmq-setsockopt for details. + +This is where you would set values like: + +* ZMQ::HWM - high water mark +* ZMQ::IDENTITY - named queues +* ZMQ::SWAP_SIZE - space for disk overflow + +Example: +[source,ruby] + sockopt => { + "ZMQ::HWM" => 50 + "ZMQ::IDENTITY" => "my_named_queue" + } + +[id="{version}-plugins-{type}s-{plugin}-topic"] +===== `topic` + + * Value type is <> + * Default value is `""` + +This is used for the 'pubsub' topology only. +On inputs, this allows you to filter messages by topic. +On outputs, this allows you to tag a message for routing. +NOTE: ZeroMQ does subscriber-side filtering +NOTE: Topic is evaluated with `event.sprintf` so macros are valid here. + +[id="{version}-plugins-{type}s-{plugin}-topology"] +===== `topology` + + * This is a required setting. + * Value can be any of: `pushpull`, `pubsub`, `pair` + * There is no default value for this setting. + +The default logstash topologies work as follows: + +* pushpull - inputs are pull, outputs are push +* pubsub - inputs are subscribers, outputs are publishers +* pair - inputs are clients, outputs are servers + +If the predefined topology flows don't work for you, +you can change the 'mode' setting + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/zeromq-v3.1.2.asciidoc b/docs/versioned-plugins/outputs/zeromq-v3.1.2.asciidoc new file mode 100644 index 000000000..fca296baf --- /dev/null +++ b/docs/versioned-plugins/outputs/zeromq-v3.1.2.asciidoc @@ -0,0 +1,126 @@ +:plugin: zeromq +:type: output + +/////////////////////////////////////////// +START - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// +:version: v3.1.2 +:release_date: 2017-11-13 +:changelog_url: https://github.com/logstash-plugins/logstash-output-zeromq/blob/v3.1.2/CHANGELOG.md +:include_path: ../../../../logstash/docs/include +/////////////////////////////////////////// +END - GENERATED VARIABLES, DO NOT EDIT! +/////////////////////////////////////////// + +[id="{version}-plugins-{type}s-{plugin}"] + +=== Zeromq output plugin {version} + +include::{include_path}/plugin_header.asciidoc[] + +==== Description + +Write events to a 0MQ PUB socket. + +You need to have the 0mq 2.1.x library installed to be able to use +this output plugin. + +The default settings will create a publisher connecting to a subscriber +bound to tcp://127.0.0.1:2120 + + +[id="{version}-plugins-{type}s-{plugin}-options"] +==== Zeromq Output Configuration Options + +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. + +[cols="<,<,<",options="header",] +|======================================================================= +|Setting |Input type|Required +| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-sockopt>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topic>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-topology>> |<>, one of `["pushpull", "pubsub", "pair"]`|Yes +|======================================================================= + +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all +output plugins. + +  + +[id="{version}-plugins-{type}s-{plugin}-address"] +===== `address` + + * Value type is <> + * Default value is `["tcp://127.0.0.1:2120"]` + +This will be a performance bottleneck. Someone needs to upgrade this to +concurrency :shared and make sure there is no breakage +0mq socket address to connect or bind. +Please note that `inproc://` will not work with logstashi. +For each we use a context per thread. +By default, inputs bind/listen and outputs connect. + +[id="{version}-plugins-{type}s-{plugin}-mode"] +===== `mode` + + * Value can be any of: `server`, `client` + * Default value is `"client"` + +Server mode binds/listens. Client mode connects. + +[id="{version}-plugins-{type}s-{plugin}-sockopt"] +===== `sockopt` + + * Value type is <> + * There is no default value for this setting. + +This exposes zmq_setsockopt for advanced tuning. +See http://api.zeromq.org/2-1:zmq-setsockopt for details. + +This is where you would set values like: + +* ZMQ::HWM - high water mark +* ZMQ::IDENTITY - named queues +* ZMQ::SWAP_SIZE - space for disk overflow + +Example: +[source,ruby] + sockopt => { + "ZMQ::HWM" => 50 + "ZMQ::IDENTITY" => "my_named_queue" + } + +[id="{version}-plugins-{type}s-{plugin}-topic"] +===== `topic` + + * Value type is <> + * Default value is `""` + +This is used for the 'pubsub' topology only. +On inputs, this allows you to filter messages by topic. +On outputs, this allows you to tag a message for routing. +NOTE: ZeroMQ does subscriber-side filtering +NOTE: Topic is evaluated with `event.sprintf` so macros are valid here. + +[id="{version}-plugins-{type}s-{plugin}-topology"] +===== `topology` + + * This is a required setting. + * Value can be any of: `pushpull`, `pubsub`, `pair` + * There is no default value for this setting. + +The default logstash topologies work as follows: + +* pushpull - inputs are pull, outputs are push +* pubsub - inputs are subscribers, outputs are publishers +* pair - inputs are clients, outputs are servers + +If the predefined topology flows don't work for you, +you can change the 'mode' setting + + + +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/zookeeper-index.asciidoc b/docs/versioned-plugins/outputs/zookeeper-index.asciidoc new file mode 100644 index 000000000..4efb92606 --- /dev/null +++ b/docs/versioned-plugins/outputs/zookeeper-index.asciidoc @@ -0,0 +1,10 @@ +:plugin: zookeeper +:type: output + +include::{include_path}/version-list-intro.asciidoc[] + +|======================================================================= +| Version | Release Date +|======================================================================= + + From 37052357d47ff4302d7d1057a0784a0caf6818c7 Mon Sep 17 00:00:00 2001 From: Joao Duarte Date: Thu, 11 Jan 2018 17:54:49 +0000 Subject: [PATCH 4/5] make it simpler --- docs/versioned-plugins/codecs-index.asciidoc | 27 - .../codecs/avro-index.asciidoc | 18 - .../codecs/avro-v3.2.0.asciidoc | 103 ---- .../codecs/avro-v3.2.1.asciidoc | 96 --- .../codecs/avro-v3.2.2.asciidoc | 96 --- .../codecs/avro-v3.2.3.asciidoc | 96 --- .../codecs/cloudfront-index.asciidoc | 16 - .../codecs/cloudfront-v3.0.1.asciidoc | 52 -- .../codecs/cloudfront-v3.0.2.asciidoc | 52 -- .../codecs/cloudfront-v3.0.3.asciidoc | 52 -- .../codecs/cloudtrail-index.asciidoc | 14 - .../codecs/cloudtrail-v3.0.2.asciidoc | 44 -- .../codecs/cloudtrail-v3.0.3.asciidoc | 44 -- .../codecs/collectd-index.asciidoc | 20 - .../codecs/collectd-v3.0.4.asciidoc | 147 ----- .../codecs/collectd-v3.0.5.asciidoc | 140 ----- .../codecs/collectd-v3.0.6.asciidoc | 140 ----- .../codecs/collectd-v3.0.7.asciidoc | 140 ----- .../codecs/collectd-v3.0.8.asciidoc | 140 ----- .../codecs/compress_spooler-index.asciidoc | 14 - .../codecs/compress_spooler-v2.0.5.asciidoc | 64 -- .../codecs/compress_spooler-v2.0.6.asciidoc | 64 -- .../codecs/csv-index.asciidoc | 14 - .../codecs/csv-v0.1.3.asciidoc | 132 ---- .../codecs/csv-v0.1.4.asciidoc | 132 ---- .../codecs/dots-index.asciidoc | 16 - .../codecs/dots-v3.0.3.asciidoc | 31 - .../codecs/dots-v3.0.5.asciidoc | 23 - .../codecs/dots-v3.0.6.asciidoc | 23 - .../codecs/edn-index.asciidoc | 16 - .../codecs/edn-v3.0.3.asciidoc | 33 - .../codecs/edn-v3.0.5.asciidoc | 25 - .../codecs/edn-v3.0.6.asciidoc | 25 - .../codecs/edn_lines-index.asciidoc | 16 - .../codecs/edn_lines-v3.0.3.asciidoc | 31 - .../codecs/edn_lines-v3.0.5.asciidoc | 23 - .../codecs/edn_lines-v3.0.6.asciidoc | 23 - .../codecs/es_bulk-index.asciidoc | 16 - .../codecs/es_bulk-v3.0.4.asciidoc | 35 -- .../codecs/es_bulk-v3.0.5.asciidoc | 28 - .../codecs/es_bulk-v3.0.6.asciidoc | 28 - .../codecs/example-index.asciidoc | 10 - .../codecs/fluent-index.asciidoc | 18 - .../codecs/fluent-v3.1.2.asciidoc | 51 -- .../codecs/fluent-v3.1.3.asciidoc | 43 -- .../codecs/fluent-v3.1.4.asciidoc | 43 -- .../codecs/fluent-v3.1.5.asciidoc | 43 -- .../codecs/graphite-index.asciidoc | 16 - .../codecs/graphite-v3.0.3.asciidoc | 93 --- .../codecs/graphite-v3.0.4.asciidoc | 93 --- .../codecs/graphite-v3.0.5.asciidoc | 93 --- .../codecs/gzip_lines-index.asciidoc | 18 - .../codecs/gzip_lines-v3.0.0.asciidoc | 59 -- .../codecs/gzip_lines-v3.0.1.asciidoc | 52 -- .../codecs/gzip_lines-v3.0.2.asciidoc | 52 -- .../codecs/gzip_lines-v3.0.3.asciidoc | 52 -- .../codecs/json_lines-index.asciidoc | 16 - .../codecs/json_lines-v3.0.3.asciidoc | 67 --- .../codecs/json_lines-v3.0.4.asciidoc | 67 --- .../codecs/json_lines-v3.0.5.asciidoc | 67 --- .../codecs/json_pretty-index.asciidoc | 10 - .../codecs/line-index.asciidoc | 22 - .../codecs/line-v3.0.3.asciidoc | 72 --- .../codecs/line-v3.0.4.asciidoc | 72 --- .../codecs/line-v3.0.5.asciidoc | 72 --- .../codecs/line-v3.0.6.asciidoc | 72 --- .../codecs/line-v3.0.7.asciidoc | 72 --- .../codecs/line-v3.0.8.asciidoc | 72 --- .../codecs/msgpack-index.asciidoc | 18 - .../codecs/msgpack-v3.0.3.asciidoc | 44 -- .../codecs/msgpack-v3.0.5.asciidoc | 44 -- .../codecs/msgpack-v3.0.6.asciidoc | 44 -- .../codecs/msgpack-v3.0.7.asciidoc | 44 -- .../codecs/multiline-index.asciidoc | 18 - .../codecs/multiline-v3.0.5.asciidoc | 222 ------- .../codecs/multiline-v3.0.6.asciidoc | 222 ------- .../codecs/multiline-v3.0.7.asciidoc | 222 ------- .../codecs/multiline-v3.0.8.asciidoc | 222 ------- .../codecs/netflow-index.asciidoc | 34 -- .../codecs/netflow-v3.10.0.asciidoc | 210 ------- .../codecs/netflow-v3.4.1.asciidoc | 192 ------ .../codecs/netflow-v3.5.0.asciidoc | 192 ------ .../codecs/netflow-v3.5.1.asciidoc | 187 ------ .../codecs/netflow-v3.5.2.asciidoc | 206 ------- .../codecs/netflow-v3.6.0.asciidoc | 205 ------- .../codecs/netflow-v3.7.0.asciidoc | 205 ------- .../codecs/netflow-v3.7.1.asciidoc | 205 ------- .../codecs/netflow-v3.8.0.asciidoc | 205 ------- .../codecs/netflow-v3.8.1.asciidoc | 207 ------- .../codecs/netflow-v3.8.3.asciidoc | 207 ------- .../codecs/netflow-v3.9.0.asciidoc | 209 ------- .../codecs/netflow-v3.9.1.asciidoc | 209 ------- .../codecs/nmap-index.asciidoc | 16 - .../codecs/nmap-v0.0.19.asciidoc | 81 --- .../codecs/nmap-v0.0.20.asciidoc | 81 --- .../codecs/nmap-v0.0.21.asciidoc | 81 --- .../codecs/oldlogstashjson-index.asciidoc | 14 - .../codecs/oldlogstashjson-v2.0.5.asciidoc | 31 - .../codecs/oldlogstashjson-v2.0.7.asciidoc | 26 - .../codecs/plain-index.asciidoc | 18 - .../codecs/plain-v3.0.3.asciidoc | 65 -- .../codecs/plain-v3.0.4.asciidoc | 65 -- .../codecs/plain-v3.0.5.asciidoc | 65 -- .../codecs/plain-v3.0.6.asciidoc | 65 -- .../codecs/pretty-index.asciidoc | 10 - .../codecs/protobuf-index.asciidoc | 14 - .../codecs/protobuf-v1.0.2.asciidoc | 106 ---- .../codecs/protobuf-v1.0.3.asciidoc | 106 ---- .../codecs/s3plain-index.asciidoc | 16 - .../codecs/s3plain-v2.0.5.asciidoc | 32 - .../codecs/s3plain-v2.0.6.asciidoc | 32 - .../codecs/s3plain-v2.0.7.asciidoc | 32 - .../codecs/sflow-index.asciidoc | 10 - docs/versioned-plugins/filters-index.asciidoc | 59 -- .../filters/age-index.asciidoc | 14 - .../filters/age-v1.0.1.asciidoc | 66 -- .../filters/age-v1.0.2.asciidoc | 66 -- .../filters/aggregate-index.asciidoc | 24 - .../filters/aggregate-v2.6.0.asciidoc | 552 ----------------- .../filters/aggregate-v2.6.1.asciidoc | 553 ----------------- .../filters/aggregate-v2.6.3.asciidoc | 542 ----------------- .../filters/aggregate-v2.6.4.asciidoc | 542 ----------------- .../filters/aggregate-v2.7.0.asciidoc | 555 ----------------- .../filters/aggregate-v2.7.1.asciidoc | 555 ----------------- .../filters/aggregate-v2.7.2.asciidoc | 555 ----------------- .../filters/alter-index.asciidoc | 16 - .../filters/alter-v3.0.1.asciidoc | 111 ---- .../filters/alter-v3.0.2.asciidoc | 111 ---- .../filters/alter-v3.0.3.asciidoc | 111 ---- .../filters/anonymize-index.asciidoc | 16 - .../filters/anonymize-v3.0.4.asciidoc | 77 --- .../filters/anonymize-v3.0.5.asciidoc | 77 --- .../filters/anonymize-v3.0.6.asciidoc | 77 --- .../filters/bytesize-index.asciidoc | 10 - .../filters/checksum-index.asciidoc | 14 - .../filters/checksum-v3.0.3.asciidoc | 69 --- .../filters/checksum-v3.0.4.asciidoc | 69 --- .../filters/cidr-index.asciidoc | 16 - .../filters/cidr-v3.0.1.asciidoc | 80 --- .../filters/cidr-v3.1.1.asciidoc | 114 ---- .../filters/cidr-v3.1.2.asciidoc | 114 ---- .../filters/cipher-index.asciidoc | 18 - .../filters/cipher-v2.0.6.asciidoc | 243 -------- .../filters/cipher-v2.0.7.asciidoc | 243 -------- .../filters/cipher-v3.0.0.asciidoc | 220 ------- .../filters/cipher-v3.0.1.asciidoc | 220 ------- .../filters/clone-index.asciidoc | 16 - .../filters/clone-v3.0.3.asciidoc | 57 -- .../filters/clone-v3.0.4.asciidoc | 57 -- .../filters/clone-v3.0.5.asciidoc | 57 -- .../filters/cloudfoundry-index.asciidoc | 10 - .../filters/collate-index.asciidoc | 14 - .../filters/collate-v2.0.5.asciidoc | 84 --- .../filters/collate-v2.0.6.asciidoc | 84 --- .../filters/csv-index.asciidoc | 20 - .../filters/csv-v3.0.3.asciidoc | 152 ----- .../filters/csv-v3.0.4.asciidoc | 153 ----- .../filters/csv-v3.0.5.asciidoc | 153 ----- .../filters/csv-v3.0.6.asciidoc | 153 ----- .../filters/csv-v3.0.7.asciidoc | 153 ----- .../filters/date-index.asciidoc | 16 - .../filters/date-v3.1.7.asciidoc | 215 ------- .../filters/date-v3.1.8.asciidoc | 215 ------- .../filters/date-v3.1.9.asciidoc | 215 ------- .../filters/de_dot-index.asciidoc | 16 - .../filters/de_dot-v1.0.1.asciidoc | 82 --- .../filters/de_dot-v1.0.2.asciidoc | 82 --- .../filters/de_dot-v1.0.3.asciidoc | 82 --- .../filters/debug-index.asciidoc | 10 - .../filters/dissect-index.asciidoc | 16 - .../filters/dissect-v1.0.9.asciidoc | 213 ------- .../filters/dissect-v1.1.1.asciidoc | 283 --------- .../filters/dissect-v1.1.2.asciidoc | 283 --------- .../filters/dns-index.asciidoc | 18 - .../filters/dns-v3.0.4.asciidoc | 161 ----- .../filters/dns-v3.0.5.asciidoc | 161 ----- .../filters/dns-v3.0.6.asciidoc | 161 ----- .../filters/dns-v3.0.7.asciidoc | 161 ----- .../filters/drop-index.asciidoc | 16 - .../filters/drop-v3.0.3.asciidoc | 77 --- .../filters/drop-v3.0.4.asciidoc | 77 --- .../filters/drop-v3.0.5.asciidoc | 77 --- .../filters/elapsed-index.asciidoc | 16 - .../filters/elapsed-v4.0.2.asciidoc | 168 ------ .../filters/elapsed-v4.0.3.asciidoc | 168 ------ .../filters/elapsed-v4.0.4.asciidoc | 168 ------ .../filters/elasticsearch-index.asciidoc | 22 - .../filters/elasticsearch-v3.1.3.asciidoc | 236 -------- .../filters/elasticsearch-v3.1.4.asciidoc | 237 -------- .../filters/elasticsearch-v3.1.5.asciidoc | 237 -------- .../filters/elasticsearch-v3.1.6.asciidoc | 237 -------- .../filters/elasticsearch-v3.2.0.asciidoc | 238 -------- .../filters/elasticsearch-v3.2.1.asciidoc | 238 -------- .../filters/emoji-index.asciidoc | 14 - .../filters/emoji-v1.0.1.asciidoc | 176 ------ .../filters/emoji-v1.0.2.asciidoc | 176 ------ .../filters/environment-index.asciidoc | 16 - .../filters/environment-v3.0.1.asciidoc | 83 --- .../filters/environment-v3.0.2.asciidoc | 83 --- .../filters/environment-v3.0.3.asciidoc | 83 --- .../filters/example-index.asciidoc | 10 - .../filters/extractnumbers-index.asciidoc | 16 - .../filters/extractnumbers-v3.0.1.asciidoc | 61 -- .../filters/extractnumbers-v3.0.2.asciidoc | 61 -- .../filters/extractnumbers-v3.0.3.asciidoc | 61 -- .../filters/fingerprint-index.asciidoc | 18 - .../filters/fingerprint-v3.0.4.asciidoc | 139 ----- .../filters/fingerprint-v3.1.0.asciidoc | 153 ----- .../filters/fingerprint-v3.1.1.asciidoc | 153 ----- .../filters/fingerprint-v3.1.2.asciidoc | 153 ----- .../filters/geoip-index.asciidoc | 28 - .../filters/geoip-v4.1.1.asciidoc | 180 ------ .../filters/geoip-v4.2.0.asciidoc | 195 ------ .../filters/geoip-v4.2.1.asciidoc | 196 ------ .../filters/geoip-v4.3.0.asciidoc | 206 ------- .../filters/geoip-v4.3.1.asciidoc | 206 ------- .../filters/geoip-v5.0.0.asciidoc | 184 ------ .../filters/geoip-v5.0.1.asciidoc | 184 ------ .../filters/geoip-v5.0.2.asciidoc | 184 ------ .../filters/geoip-v5.0.3.asciidoc | 184 ------ .../filters/hashid-index.asciidoc | 14 - .../filters/hashid-v0.1.2.asciidoc | 110 ---- .../filters/hashid-v0.1.3.asciidoc | 110 ---- .../filters/i18n-index.asciidoc | 16 - .../filters/i18n-v3.0.1.asciidoc | 62 -- .../filters/i18n-v3.0.2.asciidoc | 62 -- .../filters/i18n-v3.0.3.asciidoc | 62 -- .../filters/jdbc_static-index.asciidoc | 10 - .../filters/jdbc_streaming-index.asciidoc | 16 - .../filters/jdbc_streaming-v1.0.1.asciidoc | 226 ------- .../filters/jdbc_streaming-v1.0.2.asciidoc | 226 ------- .../filters/jdbc_streaming-v1.0.3.asciidoc | 226 ------- .../filters/json-index.asciidoc | 16 - .../filters/json-v3.0.3.asciidoc | 121 ---- .../filters/json-v3.0.4.asciidoc | 121 ---- .../filters/json-v3.0.5.asciidoc | 121 ---- .../filters/json_encode-index.asciidoc | 16 - .../filters/json_encode-v3.0.1.asciidoc | 76 --- .../filters/json_encode-v3.0.2.asciidoc | 76 --- .../filters/json_encode-v3.0.3.asciidoc | 76 --- .../kubernetes_metadata-index.asciidoc | 10 - .../filters/kv-index.asciidoc | 16 - .../filters/kv-v4.0.1.asciidoc | 409 ------------- .../filters/kv-v4.0.2.asciidoc | 409 ------------- .../filters/kv-v4.0.3.asciidoc | 409 ------------- .../filters/language-index.asciidoc | 10 - .../filters/lookup-index.asciidoc | 10 - .../filters/math-index.asciidoc | 10 - .../filters/metaevent-index.asciidoc | 14 - .../filters/metaevent-v2.0.5.asciidoc | 62 -- .../filters/metaevent-v2.0.7.asciidoc | 62 -- .../filters/metricize-index.asciidoc | 16 - .../filters/metricize-v3.0.1.asciidoc | 109 ---- .../filters/metricize-v3.0.2.asciidoc | 109 ---- .../filters/metricize-v3.0.3.asciidoc | 109 ---- .../filters/metrics-index.asciidoc | 16 - .../filters/metrics-v4.0.3.asciidoc | 228 ------- .../filters/metrics-v4.0.4.asciidoc | 228 ------- .../filters/metrics-v4.0.5.asciidoc | 228 ------- .../filters/multiline-index.asciidoc | 14 - .../filters/multiline-v3.0.3.asciidoc | 194 ------ .../filters/multiline-v3.0.4.asciidoc | 194 ------ .../filters/oui-index.asciidoc | 14 - .../filters/oui-v3.0.1.asciidoc | 70 --- .../filters/oui-v3.0.2.asciidoc | 70 --- .../filters/prune-index.asciidoc | 16 - .../filters/prune-v3.0.1.asciidoc | 154 ----- .../filters/prune-v3.0.2.asciidoc | 154 ----- .../filters/prune-v3.0.3.asciidoc | 154 ----- .../filters/punct-index.asciidoc | 14 - .../filters/punct-v2.0.5.asciidoc | 62 -- .../filters/punct-v2.0.6.asciidoc | 62 -- .../filters/range-index.asciidoc | 16 - .../filters/range-v3.0.1.asciidoc | 89 --- .../filters/range-v3.0.2.asciidoc | 89 --- .../filters/range-v3.0.3.asciidoc | 89 --- .../filters/script-index.asciidoc | 10 - .../filters/split-index.asciidoc | 20 - .../filters/split-v3.1.2.asciidoc | 111 ---- .../filters/split-v3.1.3.asciidoc | 111 ---- .../filters/split-v3.1.4.asciidoc | 111 ---- .../filters/split-v3.1.5.asciidoc | 111 ---- .../filters/split-v3.1.6.asciidoc | 111 ---- .../filters/syslog_pri-index.asciidoc | 16 - .../filters/syslog_pri-v3.0.3.asciidoc | 85 --- .../filters/syslog_pri-v3.0.4.asciidoc | 85 --- .../filters/syslog_pri-v3.0.5.asciidoc | 85 --- .../filters/throttle-index.asciidoc | 16 - .../filters/throttle-v4.0.2.asciidoc | 252 -------- .../filters/throttle-v4.0.3.asciidoc | 252 -------- .../filters/throttle-v4.0.4.asciidoc | 252 -------- .../filters/tld-index.asciidoc | 16 - .../filters/tld-v3.0.1.asciidoc | 73 --- .../filters/tld-v3.0.2.asciidoc | 73 --- .../filters/tld-v3.0.3.asciidoc | 73 --- .../filters/translate-index.asciidoc | 16 - .../filters/translate-v3.0.2.asciidoc | 211 ------- .../filters/translate-v3.0.3.asciidoc | 211 ------- .../filters/translate-v3.0.4.asciidoc | 211 ------- .../filters/truncate-index.asciidoc | 16 - .../filters/truncate-v1.0.2.asciidoc | 84 --- .../filters/truncate-v1.0.3.asciidoc | 84 --- .../filters/truncate-v1.0.4.asciidoc | 84 --- .../filters/unique-index.asciidoc | 16 - .../filters/unique-v2.0.5.asciidoc | 53 -- .../filters/unique-v2.0.6.asciidoc | 53 -- .../filters/unique-v3.0.0.asciidoc | 53 -- .../filters/urldecode-index.asciidoc | 16 - .../filters/urldecode-v3.0.4.asciidoc | 83 --- .../filters/urldecode-v3.0.5.asciidoc | 83 --- .../filters/urldecode-v3.0.6.asciidoc | 83 --- .../filters/useragent-index.asciidoc | 22 - .../filters/useragent-v3.1.0.asciidoc | 117 ---- .../filters/useragent-v3.1.1.asciidoc | 117 ---- .../filters/useragent-v3.1.3.asciidoc | 118 ---- .../filters/useragent-v3.2.0.asciidoc | 118 ---- .../filters/useragent-v3.2.1.asciidoc | 118 ---- .../filters/useragent-v3.2.2.asciidoc | 118 ---- .../filters/uuid-index.asciidoc | 16 - .../filters/uuid-v3.0.3.asciidoc | 95 --- .../filters/uuid-v3.0.4.asciidoc | 95 --- .../filters/uuid-v3.0.5.asciidoc | 95 --- .../filters/xml-index.asciidoc | 16 - .../filters/xml-v4.0.3.asciidoc | 187 ------ .../filters/xml-v4.0.4.asciidoc | 187 ------ .../filters/xml-v4.0.5.asciidoc | 187 ------ .../filters/yaml-index.asciidoc | 12 - .../filters/yaml-v0.1.1.asciidoc | 103 ---- .../filters/zeromq-index.asciidoc | 14 - .../filters/zeromq-v3.0.1.asciidoc | 148 ----- .../filters/zeromq-v3.0.2.asciidoc | 148 ----- docs/versioned-plugins/inputs-index.asciidoc | 64 -- .../inputs/cloudwatch-index.asciidoc | 16 - .../inputs/cloudwatch-v2.0.1.asciidoc | 266 -------- .../inputs/cloudwatch-v2.0.2.asciidoc | 266 -------- .../inputs/cloudwatch-v2.0.3.asciidoc | 266 -------- .../inputs/cloudwatch_logs-index.asciidoc | 10 - .../inputs/couchdb_changes-index.asciidoc | 16 - .../inputs/couchdb_changes-v3.1.2.asciidoc | 220 ------- .../inputs/couchdb_changes-v3.1.3.asciidoc | 220 ------- .../inputs/couchdb_changes-v3.1.4.asciidoc | 220 ------- .../inputs/dead_letter_queue-index.asciidoc | 24 - .../inputs/dead_letter_queue-v1.0.3.asciidoc | 108 ---- .../inputs/dead_letter_queue-v1.0.4.asciidoc | 109 ---- .../inputs/dead_letter_queue-v1.0.5.asciidoc | 109 ---- .../inputs/dead_letter_queue-v1.0.6.asciidoc | 109 ---- .../inputs/dead_letter_queue-v1.1.0.asciidoc | 109 ---- .../inputs/dead_letter_queue-v1.1.1.asciidoc | 112 ---- .../inputs/dead_letter_queue-v1.1.2.asciidoc | 112 ---- .../inputs/drupal_dblog-index.asciidoc | 14 - .../inputs/drupal_dblog-v2.0.5.asciidoc | 102 ---- .../inputs/drupal_dblog-v2.0.6.asciidoc | 102 ---- .../inputs/dynamodb-index.asciidoc | 10 - .../inputs/elasticsearch-index.asciidoc | 20 - .../inputs/elasticsearch-v4.0.4.asciidoc | 220 ------- .../inputs/elasticsearch-v4.0.5.asciidoc | 230 ------- .../inputs/elasticsearch-v4.0.6.asciidoc | 230 ------- .../inputs/elasticsearch-v4.1.0.asciidoc | 230 ------- .../inputs/elasticsearch-v4.1.1.asciidoc | 230 ------- .../inputs/eventlog-index.asciidoc | 14 - .../inputs/eventlog-v4.1.1.asciidoc | 74 --- .../inputs/eventlog-v4.1.2.asciidoc | 74 --- .../inputs/example-index.asciidoc | 10 - .../inputs/exec-index.asciidoc | 16 - .../inputs/exec-v3.1.3.asciidoc | 69 --- .../inputs/exec-v3.1.4.asciidoc | 69 --- .../inputs/exec-v3.1.5.asciidoc | 69 --- .../inputs/file-index.asciidoc | 14 - .../inputs/file-v4.0.2.asciidoc | 256 -------- .../inputs/file-v4.0.3.asciidoc | 256 -------- .../inputs/fluentd-index.asciidoc | 10 - .../inputs/ganglia-index.asciidoc | 16 - .../inputs/ganglia-v3.1.1.asciidoc | 63 -- .../inputs/ganglia-v3.1.2.asciidoc | 63 -- .../inputs/ganglia-v3.1.3.asciidoc | 63 -- .../inputs/gelf-index.asciidoc | 18 - .../inputs/gelf-v3.0.4.asciidoc | 105 ---- .../inputs/gelf-v3.0.5.asciidoc | 105 ---- .../inputs/gelf-v3.0.6.asciidoc | 105 ---- .../inputs/gelf-v3.0.7.asciidoc | 105 ---- .../inputs/gemfire-index.asciidoc | 14 - .../inputs/gemfire-v2.0.5.asciidoc | 132 ---- .../inputs/gemfire-v2.0.6.asciidoc | 132 ---- .../inputs/generator-index.asciidoc | 16 - .../inputs/generator-v3.0.3.asciidoc | 107 ---- .../inputs/generator-v3.0.4.asciidoc | 107 ---- .../inputs/generator-v3.0.5.asciidoc | 107 ---- .../inputs/github-index.asciidoc | 16 - .../inputs/github-v3.0.3.asciidoc | 81 --- .../inputs/github-v3.0.4.asciidoc | 81 --- .../inputs/github-v3.0.5.asciidoc | 81 --- .../inputs/google_pubsub-index.asciidoc | 16 - .../inputs/google_pubsub-v1.0.1.asciidoc | 213 ------- .../inputs/google_pubsub-v1.0.3.asciidoc | 213 ------- .../inputs/google_pubsub-v1.0.4.asciidoc | 213 ------- .../inputs/googleanalytics-index.asciidoc | 10 - .../inputs/graphite-index.asciidoc | 14 - .../inputs/graphite-v3.0.3.asciidoc | 175 ------ .../inputs/graphite-v3.0.4.asciidoc | 175 ------ .../inputs/heartbeat-index.asciidoc | 16 - .../inputs/heartbeat-v3.0.3.asciidoc | 97 --- .../inputs/heartbeat-v3.0.4.asciidoc | 97 --- .../inputs/heartbeat-v3.0.5.asciidoc | 97 --- .../inputs/heroku-index.asciidoc | 14 - .../inputs/heroku-v3.0.1.asciidoc | 66 -- .../inputs/heroku-v3.0.2.asciidoc | 66 -- .../inputs/http_poller-index.asciidoc | 30 - .../inputs/http_poller-v3.3.0.asciidoc | 400 ------------- .../inputs/http_poller-v3.3.1.asciidoc | 401 ------------- .../inputs/http_poller-v3.3.2.asciidoc | 401 ------------- .../inputs/http_poller-v3.3.3.asciidoc | 401 ------------- .../inputs/http_poller-v3.3.4.asciidoc | 391 ------------ .../inputs/http_poller-v4.0.0.asciidoc | 380 ------------ .../inputs/http_poller-v4.0.1.asciidoc | 380 ------------ .../inputs/http_poller-v4.0.2.asciidoc | 380 ------------ .../inputs/http_poller-v4.0.3.asciidoc | 379 ------------ .../inputs/http_poller-v4.0.4.asciidoc | 379 ------------ .../inputs/imap-index.asciidoc | 16 - .../inputs/imap-v3.0.3.asciidoc | 176 ------ .../inputs/imap-v3.0.4.asciidoc | 176 ------ .../inputs/imap-v3.0.5.asciidoc | 176 ------ .../inputs/irc-index.asciidoc | 18 - .../inputs/irc-v3.0.3.asciidoc | 152 ----- .../inputs/irc-v3.0.4.asciidoc | 152 ----- .../inputs/irc-v3.0.5.asciidoc | 152 ----- .../inputs/irc-v3.0.6.asciidoc | 152 ----- .../inputs/jdbc-index.asciidoc | 26 - .../inputs/jdbc-v4.2.1.asciidoc | 486 --------------- .../inputs/jdbc-v4.2.2.asciidoc | 486 --------------- .../inputs/jdbc-v4.2.3.asciidoc | 486 --------------- .../inputs/jdbc-v4.2.4.asciidoc | 486 --------------- .../inputs/jdbc-v4.3.0.asciidoc | 486 --------------- .../inputs/jdbc-v4.3.1.asciidoc | 486 --------------- .../inputs/jdbc-v4.3.2.asciidoc | 486 --------------- .../inputs/jdbc-v4.3.3.asciidoc | 486 --------------- .../inputs/jms-index.asciidoc | 16 - .../inputs/jms-v3.0.2.asciidoc | 259 -------- .../inputs/jms-v3.0.3.asciidoc | 259 -------- .../inputs/jms-v3.0.4.asciidoc | 259 -------- .../inputs/jmx-index.asciidoc | 16 - .../inputs/jmx-pipe-index.asciidoc | 10 - .../inputs/jmx-v3.0.2.asciidoc | 157 ----- .../inputs/jmx-v3.0.3.asciidoc | 157 ----- .../inputs/jmx-v3.0.4.asciidoc | 157 ----- .../inputs/journald-index.asciidoc | 12 - .../inputs/journald-v2.0.1.asciidoc | 152 ----- .../inputs/kafka-index.asciidoc | 26 - .../inputs/kafka-v6.3.0.asciidoc | 551 ----------------- .../inputs/kafka-v6.3.2.asciidoc | 552 ----------------- .../inputs/kafka-v6.3.3.asciidoc | 553 ----------------- .../inputs/kafka-v6.3.4.asciidoc | 553 ----------------- .../inputs/kafka-v7.0.0.asciidoc | 566 ------------------ .../inputs/kafka-v8.0.0.asciidoc | 557 ----------------- .../inputs/kafka-v8.0.2.asciidoc | 557 ----------------- .../inputs/kafka-v8.0.4.asciidoc | 542 ----------------- .../inputs/kinesis-index.asciidoc | 18 - .../inputs/kinesis-v2.0.4.asciidoc | 105 ---- .../inputs/kinesis-v2.0.5.asciidoc | 112 ---- .../inputs/kinesis-v2.0.6.asciidoc | 112 ---- .../inputs/kinesis-v2.0.7.asciidoc | 112 ---- .../inputs/log4j-index.asciidoc | 18 - .../inputs/log4j-v3.0.6.asciidoc | 171 ------ .../inputs/log4j-v3.1.0.asciidoc | 169 ------ .../inputs/log4j-v3.1.1.asciidoc | 169 ------ .../inputs/log4j-v3.1.2.asciidoc | 169 ------ .../inputs/log4j2-index.asciidoc | 10 - .../inputs/lumberjack-index.asciidoc | 16 - .../inputs/lumberjack-v3.1.2.asciidoc | 112 ---- .../inputs/lumberjack-v3.1.3.asciidoc | 112 ---- .../inputs/lumberjack-v3.1.4.asciidoc | 112 ---- .../inputs/meetup-index.asciidoc | 16 - .../inputs/meetup-v3.0.1.asciidoc | 102 ---- .../inputs/meetup-v3.0.2.asciidoc | 102 ---- .../inputs/meetup-v3.0.3.asciidoc | 102 ---- .../inputs/mongodb-index.asciidoc | 10 - .../inputs/neo4j-index.asciidoc | 14 - .../inputs/neo4j-v2.0.5.asciidoc | 93 --- .../inputs/neo4j-v2.0.6.asciidoc | 93 --- .../inputs/netflow-index.asciidoc | 10 - .../inputs/perfmon-index.asciidoc | 10 - .../inputs/pipe-index.asciidoc | 16 - .../inputs/pipe-v3.0.4.asciidoc | 61 -- .../inputs/pipe-v3.0.5.asciidoc | 61 -- .../inputs/pipe-v3.0.6.asciidoc | 61 -- .../inputs/puppet_facter-index.asciidoc | 16 - .../inputs/puppet_facter-v3.0.1.asciidoc | 106 ---- .../inputs/puppet_facter-v3.0.2.asciidoc | 106 ---- .../inputs/puppet_facter-v3.0.3.asciidoc | 106 ---- .../inputs/rabbitmq-index.asciidoc | 20 - .../inputs/rabbitmq-v5.2.4.asciidoc | 415 ------------- .../inputs/rabbitmq-v5.2.5.asciidoc | 415 ------------- .../inputs/rabbitmq-v6.0.0.asciidoc | 395 ------------ .../inputs/rabbitmq-v6.0.1.asciidoc | 395 ------------ .../inputs/rabbitmq-v6.0.2.asciidoc | 395 ------------ .../inputs/rackspace-index.asciidoc | 12 - .../inputs/rackspace-v3.0.1.asciidoc | 102 ---- .../inputs/redis-index.asciidoc | 18 - .../inputs/redis-v3.1.3.asciidoc | 139 ----- .../inputs/redis-v3.1.4.asciidoc | 139 ----- .../inputs/redis-v3.1.5.asciidoc | 139 ----- .../inputs/redis-v3.1.6.asciidoc | 139 ----- .../inputs/relp-index.asciidoc | 16 - .../inputs/relp-v3.0.1.asciidoc | 126 ---- .../inputs/relp-v3.0.2.asciidoc | 126 ---- .../inputs/relp-v3.0.3.asciidoc | 126 ---- .../inputs/rss-index.asciidoc | 16 - .../inputs/rss-v3.0.2.asciidoc | 70 --- .../inputs/rss-v3.0.3.asciidoc | 70 --- .../inputs/rss-v3.0.4.asciidoc | 70 --- .../inputs/s3sqs-index.asciidoc | 10 - .../inputs/salesforce-index.asciidoc | 14 - .../inputs/salesforce-v3.0.2.asciidoc | 199 ------ .../inputs/salesforce-v3.0.3.asciidoc | 199 ------ .../inputs/snmptrap-index.asciidoc | 16 - .../inputs/snmptrap-v3.0.3.asciidoc | 88 --- .../inputs/snmptrap-v3.0.4.asciidoc | 88 --- .../inputs/snmptrap-v3.0.5.asciidoc | 88 --- .../inputs/sqlite-index.asciidoc | 16 - .../inputs/sqlite-v3.0.1.asciidoc | 124 ---- .../inputs/sqlite-v3.0.2.asciidoc | 124 ---- .../inputs/sqlite-v3.0.3.asciidoc | 124 ---- .../inputs/sqs-index.asciidoc | 18 - .../inputs/sqs-v3.0.3.asciidoc | 219 ------- .../inputs/sqs-v3.0.4.asciidoc | 220 ------- .../inputs/sqs-v3.0.5.asciidoc | 220 ------- .../inputs/sqs-v3.0.6.asciidoc | 220 ------- .../inputs/stdin-index.asciidoc | 16 - .../inputs/stdin-v3.2.3.asciidoc | 39 -- .../inputs/stdin-v3.2.4.asciidoc | 35 -- .../inputs/stdin-v3.2.5.asciidoc | 35 -- .../inputs/stomp-index.asciidoc | 18 - .../inputs/stomp-v3.0.4.asciidoc | 119 ---- .../inputs/stomp-v3.0.5.asciidoc | 119 ---- .../inputs/stomp-v3.0.6.asciidoc | 119 ---- .../inputs/stomp-v3.0.7.asciidoc | 119 ---- .../inputs/syslog-index.asciidoc | 18 - .../inputs/syslog-v3.2.1.asciidoc | 144 ----- .../inputs/syslog-v3.2.2.asciidoc | 144 ----- .../inputs/syslog-v3.2.3.asciidoc | 144 ----- .../inputs/syslog-v3.2.4.asciidoc | 144 ----- .../inputs/twitter-index.asciidoc | 18 - .../inputs/twitter-v3.0.4.asciidoc | 225 ------- .../inputs/twitter-v3.0.5.asciidoc | 226 ------- .../inputs/twitter-v3.0.6.asciidoc | 226 ------- .../inputs/twitter-v3.0.7.asciidoc | 226 ------- .../inputs/udp-index.asciidoc | 20 - .../inputs/udp-v3.1.1.asciidoc | 106 ---- .../inputs/udp-v3.1.2.asciidoc | 106 ---- .../inputs/udp-v3.1.3.asciidoc | 106 ---- .../inputs/udp-v3.2.0.asciidoc | 106 ---- .../inputs/udp-v3.2.1.asciidoc | 106 ---- .../inputs/unix-index.asciidoc | 16 - .../inputs/unix-v3.0.4.asciidoc | 103 ---- .../inputs/unix-v3.0.5.asciidoc | 103 ---- .../inputs/unix-v3.0.6.asciidoc | 103 ---- .../inputs/varnishlog-index.asciidoc | 16 - .../inputs/varnishlog-v3.0.1.asciidoc | 52 -- .../inputs/varnishlog-v3.0.2.asciidoc | 52 -- .../inputs/varnishlog-v3.0.3.asciidoc | 52 -- .../inputs/websocket-index.asciidoc | 16 - .../inputs/websocket-v4.0.1.asciidoc | 64 -- .../inputs/websocket-v4.0.2.asciidoc | 64 -- .../inputs/websocket-v4.0.3.asciidoc | 64 -- .../inputs/wmi-index.asciidoc | 16 - .../inputs/wmi-v3.0.1.asciidoc | 119 ---- .../inputs/wmi-v3.0.2.asciidoc | 119 ---- .../inputs/wmi-v3.0.3.asciidoc | 119 ---- .../inputs/xmpp-index.asciidoc | 18 - .../inputs/xmpp-v3.1.3.asciidoc | 87 --- .../inputs/xmpp-v3.1.4.asciidoc | 87 --- .../inputs/xmpp-v3.1.5.asciidoc | 87 --- .../inputs/xmpp-v3.1.6.asciidoc | 87 --- .../inputs/zenoss-index.asciidoc | 14 - .../inputs/zenoss-v2.0.5.asciidoc | 398 ------------ .../inputs/zenoss-v2.0.6.asciidoc | 398 ------------ .../inputs/zeromq-index.asciidoc | 12 - .../inputs/zeromq-v3.0.3.asciidoc | 159 ----- docs/versioned-plugins/outputs-index.asciidoc | 66 -- .../outputs/beats-index.asciidoc | 10 - .../outputs/boundary-index.asciidoc | 16 - .../outputs/boundary-v3.0.2.asciidoc | 139 ----- .../outputs/boundary-v3.0.3.asciidoc | 139 ----- .../outputs/boundary-v3.0.4.asciidoc | 139 ----- .../outputs/circonus-index.asciidoc | 16 - .../outputs/circonus-v3.0.1.asciidoc | 93 --- .../outputs/circonus-v3.0.3.asciidoc | 91 --- .../outputs/circonus-v3.0.4.asciidoc | 91 --- .../outputs/cloudwatch-index.asciidoc | 16 - .../outputs/cloudwatch-v3.0.5.asciidoc | 317 ---------- .../outputs/cloudwatch-v3.0.6.asciidoc | 317 ---------- .../outputs/cloudwatch-v3.0.7.asciidoc | 317 ---------- .../outputs/csv-index.asciidoc | 16 - .../outputs/csv-v3.0.4.asciidoc | 175 ------ .../outputs/csv-v3.0.5.asciidoc | 175 ------ .../outputs/csv-v3.0.6.asciidoc | 175 ------ .../outputs/datadog-index.asciidoc | 16 - .../outputs/datadog-v3.0.1.asciidoc | 124 ---- .../outputs/datadog-v3.0.3.asciidoc | 122 ---- .../outputs/datadog-v3.0.4.asciidoc | 122 ---- .../outputs/datadog_metrics-index.asciidoc | 14 - .../outputs/datadog_metrics-v3.0.1.asciidoc | 130 ---- .../outputs/datadog_metrics-v3.0.2.asciidoc | 130 ---- .../outputs/elasticsearch_java-index.asciidoc | 12 - .../elasticsearch_java-v2.1.4.asciidoc | 491 --------------- .../outputs/example-index.asciidoc | 10 - .../outputs/exec-index.asciidoc | 16 - .../outputs/exec-v3.1.1.asciidoc | 86 --- .../outputs/exec-v3.1.2.asciidoc | 86 --- .../outputs/exec-v3.1.3.asciidoc | 86 --- .../outputs/firehose-index.asciidoc | 10 - .../outputs/ganglia-index.asciidoc | 16 - .../outputs/ganglia-v3.0.3.asciidoc | 139 ----- .../outputs/ganglia-v3.0.4.asciidoc | 139 ----- .../outputs/ganglia-v3.0.5.asciidoc | 139 ----- .../outputs/gelf-index.asciidoc | 14 - .../outputs/gelf-v3.1.3.asciidoc | 167 ------ .../outputs/gelf-v3.1.4.asciidoc | 167 ------ .../outputs/gemfire-index.asciidoc | 14 - .../outputs/gemfire-v2.0.5.asciidoc | 100 ---- .../outputs/gemfire-v2.0.6.asciidoc | 100 ---- .../outputs/google_bigquery-index.asciidoc | 16 - .../outputs/google_bigquery-v3.2.1.asciidoc | 302 ---------- .../outputs/google_bigquery-v3.2.2.asciidoc | 302 ---------- .../outputs/google_bigquery-v3.2.3.asciidoc | 302 ---------- .../google_cloud_storage-index.asciidoc | 14 - .../google_cloud_storage-v3.0.3.asciidoc | 206 ------- .../google_cloud_storage-v3.0.4.asciidoc | 206 ------- .../outputs/graphtastic-index.asciidoc | 16 - .../outputs/graphtastic-v3.0.1.asciidoc | 148 ----- .../outputs/graphtastic-v3.0.2.asciidoc | 148 ----- .../outputs/graphtastic-v3.0.3.asciidoc | 148 ----- .../outputs/hipchat-index.asciidoc | 12 - .../outputs/hipchat-v4.0.3.asciidoc | 122 ---- .../outputs/http-index.asciidoc | 30 - .../outputs/http-v4.3.0.asciidoc | 380 ------------ .../outputs/http-v4.3.1.asciidoc | 381 ------------ .../outputs/http-v4.3.2.asciidoc | 377 ------------ .../outputs/http-v4.3.4.asciidoc | 379 ------------ .../outputs/http-v4.4.0.asciidoc | 389 ------------ .../outputs/http-v5.0.0.asciidoc | 369 ------------ .../outputs/http-v5.0.1.asciidoc | 369 ------------ .../outputs/http-v5.1.0.asciidoc | 379 ------------ .../outputs/http-v5.1.1.asciidoc | 379 ------------ .../outputs/http-v5.1.2.asciidoc | 379 ------------ .../outputs/icinga-index.asciidoc | 10 - .../outputs/influxdb-index.asciidoc | 16 - .../outputs/influxdb-v5.0.1.asciidoc | 270 --------- .../outputs/influxdb-v5.0.2.asciidoc | 270 --------- .../outputs/influxdb-v5.0.3.asciidoc | 270 --------- .../outputs/irc-index.asciidoc | 16 - .../outputs/irc-v3.0.3.asciidoc | 157 ----- .../outputs/irc-v3.0.4.asciidoc | 157 ----- .../outputs/irc-v3.0.5.asciidoc | 157 ----- .../outputs/jira-index.asciidoc | 14 - .../outputs/jira-v3.0.2.asciidoc | 195 ------ .../outputs/jira-v3.0.3.asciidoc | 195 ------ .../outputs/jms-index.asciidoc | 12 - .../outputs/jms-v3.0.1.asciidoc | 175 ------ .../outputs/juggernaut-index.asciidoc | 16 - .../outputs/juggernaut-v3.0.3.asciidoc | 115 ---- .../outputs/juggernaut-v3.0.4.asciidoc | 115 ---- .../outputs/juggernaut-v3.0.5.asciidoc | 115 ---- .../outputs/kafka-index.asciidoc | 26 - .../outputs/kafka-v6.2.0.asciidoc | 449 -------------- .../outputs/kafka-v6.2.1.asciidoc | 450 -------------- .../outputs/kafka-v6.2.2.asciidoc | 451 -------------- .../outputs/kafka-v7.0.0.asciidoc | 418 ------------- .../outputs/kafka-v7.0.1.asciidoc | 418 ------------- .../outputs/kafka-v7.0.3.asciidoc | 425 ------------- .../outputs/kafka-v7.0.4.asciidoc | 425 ------------- .../outputs/kafka-v7.0.6.asciidoc | 425 ------------- .../outputs/librato-index.asciidoc | 16 - .../outputs/librato-v3.0.2.asciidoc | 162 ----- .../outputs/librato-v3.0.4.asciidoc | 162 ----- .../outputs/librato-v3.0.5.asciidoc | 162 ----- .../outputs/logentries-index.asciidoc | 10 - .../outputs/loggly-index.asciidoc | 16 - .../outputs/loggly-v3.0.1.asciidoc | 164 ----- .../outputs/loggly-v3.0.2.asciidoc | 164 ----- .../outputs/loggly-v3.0.3.asciidoc | 164 ----- .../outputs/lumberjack-index.asciidoc | 14 - .../outputs/lumberjack-v3.1.3.asciidoc | 101 ---- .../outputs/lumberjack-v3.1.5.asciidoc | 102 ---- .../outputs/metriccatcher-index.asciidoc | 16 - .../outputs/metriccatcher-v3.0.1.asciidoc | 164 ----- .../outputs/metriccatcher-v3.0.2.asciidoc | 164 ----- .../outputs/metriccatcher-v3.0.3.asciidoc | 164 ----- .../outputs/monasca_log_api-index.asciidoc | 12 - .../outputs/monasca_log_api-v1.0.2.asciidoc | 179 ------ .../outputs/mongodb-index.asciidoc | 16 - .../outputs/mongodb-v3.1.1.asciidoc | 134 ----- .../outputs/mongodb-v3.1.2.asciidoc | 134 ----- .../outputs/mongodb-v3.1.3.asciidoc | 134 ----- .../outputs/nagios-index.asciidoc | 16 - .../outputs/nagios-v3.0.3.asciidoc | 90 --- .../outputs/nagios-v3.0.4.asciidoc | 90 --- .../outputs/nagios-v3.0.5.asciidoc | 90 --- .../outputs/nagios_nsca-index.asciidoc | 16 - .../outputs/nagios_nsca-v3.0.3.asciidoc | 141 ----- .../outputs/nagios_nsca-v3.0.4.asciidoc | 141 ----- .../outputs/nagios_nsca-v3.0.5.asciidoc | 141 ----- .../outputs/neo4j-index.asciidoc | 12 - .../outputs/neo4j-v2.0.5.asciidoc | 53 -- .../outputs/newrelic-index.asciidoc | 10 - .../outputs/null-index.asciidoc | 14 - .../outputs/null-v3.0.3.asciidoc | 37 -- .../outputs/null-v3.0.4.asciidoc | 37 -- .../outputs/opentsdb-index.asciidoc | 16 - .../outputs/opentsdb-v3.1.2.asciidoc | 87 --- .../outputs/opentsdb-v3.1.3.asciidoc | 87 --- .../outputs/opentsdb-v3.1.4.asciidoc | 87 --- .../outputs/pagerduty-index.asciidoc | 16 - .../outputs/pagerduty-v3.0.4.asciidoc | 105 ---- .../outputs/pagerduty-v3.0.5.asciidoc | 105 ---- .../outputs/pagerduty-v3.0.6.asciidoc | 105 ---- .../outputs/pipe-index.asciidoc | 16 - .../outputs/pipe-v3.0.3.asciidoc | 80 --- .../outputs/pipe-v3.0.4.asciidoc | 80 --- .../outputs/pipe-v3.0.5.asciidoc | 80 --- .../outputs/rabbitmq-index.asciidoc | 24 - .../outputs/rabbitmq-v4.0.8.asciidoc | 279 --------- .../outputs/rabbitmq-v4.0.9.asciidoc | 293 --------- .../outputs/rabbitmq-v5.0.0.asciidoc | 266 -------- .../outputs/rabbitmq-v5.0.1.asciidoc | 266 -------- .../outputs/rabbitmq-v5.0.2.asciidoc | 266 -------- .../outputs/rabbitmq-v5.0.3.asciidoc | 266 -------- .../outputs/rabbitmq-v5.1.0.asciidoc | 266 -------- .../outputs/rackspace-index.asciidoc | 14 - .../outputs/rackspace-v2.0.5.asciidoc | 91 --- .../outputs/rackspace-v2.0.7.asciidoc | 91 --- .../outputs/rados-index.asciidoc | 10 - .../outputs/redis-index.asciidoc | 22 - .../outputs/redis-v3.0.4.asciidoc | 221 ------- .../outputs/redis-v3.0.5.asciidoc | 221 ------- .../outputs/redis-v4.0.0.asciidoc | 202 ------- .../outputs/redis-v4.0.1.asciidoc | 202 ------- .../outputs/redis-v4.0.2.asciidoc | 202 ------- .../outputs/redis-v4.0.3.asciidoc | 202 ------- .../outputs/redmine-index.asciidoc | 16 - .../outputs/redmine-v3.0.1.asciidoc | 192 ------ .../outputs/redmine-v3.0.2.asciidoc | 192 ------ .../outputs/redmine-v3.0.3.asciidoc | 192 ------ .../outputs/riak-index.asciidoc | 16 - .../outputs/riak-v3.0.1.asciidoc | 177 ------ .../outputs/riak-v3.0.2.asciidoc | 177 ------ .../outputs/riak-v3.0.3.asciidoc | 177 ------ .../outputs/riemann-index.asciidoc | 16 - .../outputs/riemann-v3.0.1.asciidoc | 178 ------ .../outputs/riemann-v3.0.2.asciidoc | 178 ------ .../outputs/riemann-v3.0.3.asciidoc | 178 ------ .../outputs/s3-index.asciidoc | 14 - .../outputs/s3-v4.0.8.asciidoc | 327 ---------- .../outputs/s3-v4.0.9.asciidoc | 327 ---------- .../outputs/slack-index.asciidoc | 14 - .../outputs/slack-v2.0.3.asciidoc | 107 ---- .../outputs/slack-v2.1.0.asciidoc | 107 ---- .../outputs/sns-index.asciidoc | 16 - .../outputs/sns-v4.0.4.asciidoc | 162 ----- .../outputs/sns-v4.0.5.asciidoc | 162 ----- .../outputs/sns-v4.0.6.asciidoc | 162 ----- .../outputs/solr_http-index.asciidoc | 16 - .../outputs/solr_http-v3.0.2.asciidoc | 92 --- .../outputs/solr_http-v3.0.3.asciidoc | 92 --- .../outputs/solr_http-v3.0.4.asciidoc | 92 --- .../outputs/sqs-index.asciidoc | 20 - .../outputs/sqs-v4.0.2.asciidoc | 218 ------- .../outputs/sqs-v4.0.3.asciidoc | 218 ------- .../outputs/sqs-v5.0.0.asciidoc | 197 ------ .../outputs/sqs-v5.0.1.asciidoc | 197 ------ .../outputs/sqs-v5.0.2.asciidoc | 197 ------ .../outputs/statsd-index.asciidoc | 16 - .../outputs/statsd-v3.1.2.asciidoc | 193 ------ .../outputs/statsd-v3.1.3.asciidoc | 193 ------ .../outputs/statsd-v3.1.4.asciidoc | 193 ------ .../outputs/stdout-index.asciidoc | 16 - .../outputs/stdout-v3.1.1.asciidoc | 64 -- .../outputs/stdout-v3.1.2.asciidoc | 60 -- .../outputs/stdout-v3.1.3.asciidoc | 60 -- .../outputs/stomp-index.asciidoc | 16 - .../outputs/stomp-v3.0.5.asciidoc | 123 ---- .../outputs/stomp-v3.0.7.asciidoc | 123 ---- .../outputs/stomp-v3.0.8.asciidoc | 123 ---- .../outputs/syslog-index.asciidoc | 16 - .../outputs/syslog-v3.0.2.asciidoc | 239 -------- .../outputs/syslog-v3.0.3.asciidoc | 239 -------- .../outputs/syslog-v3.0.4.asciidoc | 239 -------- .../outputs/tcp-index.asciidoc | 20 - .../outputs/tcp-v4.0.1.asciidoc | 158 ----- .../outputs/tcp-v4.0.2.asciidoc | 158 ----- .../outputs/tcp-v5.0.0.asciidoc | 144 ----- .../outputs/tcp-v5.0.1.asciidoc | 144 ----- .../outputs/tcp-v5.0.2.asciidoc | 144 ----- .../outputs/timber-index.asciidoc | 12 - .../outputs/timber-v1.0.3.asciidoc | 228 ------- .../outputs/udp-index.asciidoc | 16 - .../outputs/udp-v3.0.3.asciidoc | 65 -- .../outputs/udp-v3.0.4.asciidoc | 65 -- .../outputs/udp-v3.0.5.asciidoc | 65 -- .../outputs/webhdfs-index.asciidoc | 16 - .../outputs/webhdfs-v3.0.3.asciidoc | 293 --------- .../outputs/webhdfs-v3.0.4.asciidoc | 293 --------- .../outputs/webhdfs-v3.0.5.asciidoc | 293 --------- .../outputs/websocket-index.asciidoc | 16 - .../outputs/websocket-v3.0.2.asciidoc | 66 -- .../outputs/websocket-v3.0.3.asciidoc | 66 -- .../outputs/websocket-v3.0.4.asciidoc | 66 -- .../outputs/xmpp-index.asciidoc | 20 - .../outputs/xmpp-v3.0.3.asciidoc | 104 ---- .../outputs/xmpp-v3.0.4.asciidoc | 105 ---- .../outputs/xmpp-v3.0.5.asciidoc | 105 ---- .../outputs/xmpp-v3.0.6.asciidoc | 105 ---- .../outputs/xmpp-v3.0.7.asciidoc | 105 ---- .../outputs/zabbix-index.asciidoc | 16 - .../outputs/zabbix-v3.0.2.asciidoc | 160 ----- .../outputs/zabbix-v3.0.3.asciidoc | 160 ----- .../outputs/zabbix-v3.0.4.asciidoc | 160 ----- .../outputs/zeromq-index.asciidoc | 14 - .../outputs/zeromq-v3.1.1.asciidoc | 126 ---- .../outputs/zeromq-v3.1.2.asciidoc | 126 ---- .../outputs/zookeeper-index.asciidoc | 10 - 820 files changed, 103813 deletions(-) delete mode 100644 docs/versioned-plugins/codecs/avro-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/avro-v3.2.0.asciidoc delete mode 100644 docs/versioned-plugins/codecs/avro-v3.2.1.asciidoc delete mode 100644 docs/versioned-plugins/codecs/avro-v3.2.2.asciidoc delete mode 100644 docs/versioned-plugins/codecs/avro-v3.2.3.asciidoc delete mode 100644 docs/versioned-plugins/codecs/cloudfront-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/cloudfront-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/codecs/cloudfront-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/codecs/cloudfront-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/codecs/cloudtrail-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/cloudtrail-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/codecs/cloudtrail-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/codecs/collectd-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/collectd-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/codecs/collectd-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/codecs/collectd-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/codecs/collectd-v3.0.7.asciidoc delete mode 100644 docs/versioned-plugins/codecs/collectd-v3.0.8.asciidoc delete mode 100644 docs/versioned-plugins/codecs/compress_spooler-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/compress_spooler-v2.0.5.asciidoc delete mode 100644 docs/versioned-plugins/codecs/compress_spooler-v2.0.6.asciidoc delete mode 100644 docs/versioned-plugins/codecs/csv-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/csv-v0.1.3.asciidoc delete mode 100644 docs/versioned-plugins/codecs/csv-v0.1.4.asciidoc delete mode 100644 docs/versioned-plugins/codecs/dots-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/dots-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/codecs/dots-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/codecs/dots-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/codecs/edn-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/edn-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/codecs/edn-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/codecs/edn-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/codecs/edn_lines-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/edn_lines-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/codecs/edn_lines-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/codecs/edn_lines-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/codecs/es_bulk-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/es_bulk-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/codecs/es_bulk-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/codecs/es_bulk-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/codecs/example-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/fluent-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/fluent-v3.1.2.asciidoc delete mode 100644 docs/versioned-plugins/codecs/fluent-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/codecs/fluent-v3.1.4.asciidoc delete mode 100644 docs/versioned-plugins/codecs/fluent-v3.1.5.asciidoc delete mode 100644 docs/versioned-plugins/codecs/graphite-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/graphite-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/codecs/graphite-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/codecs/graphite-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/codecs/gzip_lines-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/gzip_lines-v3.0.0.asciidoc delete mode 100644 docs/versioned-plugins/codecs/gzip_lines-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/codecs/gzip_lines-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/codecs/gzip_lines-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/codecs/json_lines-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/json_lines-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/codecs/json_lines-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/codecs/json_lines-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/codecs/json_pretty-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/line-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/line-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/codecs/line-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/codecs/line-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/codecs/line-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/codecs/line-v3.0.7.asciidoc delete mode 100644 docs/versioned-plugins/codecs/line-v3.0.8.asciidoc delete mode 100644 docs/versioned-plugins/codecs/msgpack-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/msgpack-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/codecs/msgpack-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/codecs/msgpack-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/codecs/msgpack-v3.0.7.asciidoc delete mode 100644 docs/versioned-plugins/codecs/multiline-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/multiline-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/codecs/multiline-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/codecs/multiline-v3.0.7.asciidoc delete mode 100644 docs/versioned-plugins/codecs/multiline-v3.0.8.asciidoc delete mode 100644 docs/versioned-plugins/codecs/netflow-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/netflow-v3.10.0.asciidoc delete mode 100644 docs/versioned-plugins/codecs/netflow-v3.4.1.asciidoc delete mode 100644 docs/versioned-plugins/codecs/netflow-v3.5.0.asciidoc delete mode 100644 docs/versioned-plugins/codecs/netflow-v3.5.1.asciidoc delete mode 100644 docs/versioned-plugins/codecs/netflow-v3.5.2.asciidoc delete mode 100644 docs/versioned-plugins/codecs/netflow-v3.6.0.asciidoc delete mode 100644 docs/versioned-plugins/codecs/netflow-v3.7.0.asciidoc delete mode 100644 docs/versioned-plugins/codecs/netflow-v3.7.1.asciidoc delete mode 100644 docs/versioned-plugins/codecs/netflow-v3.8.0.asciidoc delete mode 100644 docs/versioned-plugins/codecs/netflow-v3.8.1.asciidoc delete mode 100644 docs/versioned-plugins/codecs/netflow-v3.8.3.asciidoc delete mode 100644 docs/versioned-plugins/codecs/netflow-v3.9.0.asciidoc delete mode 100644 docs/versioned-plugins/codecs/netflow-v3.9.1.asciidoc delete mode 100644 docs/versioned-plugins/codecs/nmap-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/nmap-v0.0.19.asciidoc delete mode 100644 docs/versioned-plugins/codecs/nmap-v0.0.20.asciidoc delete mode 100644 docs/versioned-plugins/codecs/nmap-v0.0.21.asciidoc delete mode 100644 docs/versioned-plugins/codecs/oldlogstashjson-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/oldlogstashjson-v2.0.5.asciidoc delete mode 100644 docs/versioned-plugins/codecs/oldlogstashjson-v2.0.7.asciidoc delete mode 100644 docs/versioned-plugins/codecs/plain-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/plain-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/codecs/plain-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/codecs/plain-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/codecs/plain-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/codecs/pretty-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/protobuf-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/protobuf-v1.0.2.asciidoc delete mode 100644 docs/versioned-plugins/codecs/protobuf-v1.0.3.asciidoc delete mode 100644 docs/versioned-plugins/codecs/s3plain-index.asciidoc delete mode 100644 docs/versioned-plugins/codecs/s3plain-v2.0.5.asciidoc delete mode 100644 docs/versioned-plugins/codecs/s3plain-v2.0.6.asciidoc delete mode 100644 docs/versioned-plugins/codecs/s3plain-v2.0.7.asciidoc delete mode 100644 docs/versioned-plugins/codecs/sflow-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/age-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/age-v1.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/age-v1.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/aggregate-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/aggregate-v2.6.0.asciidoc delete mode 100644 docs/versioned-plugins/filters/aggregate-v2.6.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/aggregate-v2.6.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/aggregate-v2.6.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/aggregate-v2.7.0.asciidoc delete mode 100644 docs/versioned-plugins/filters/aggregate-v2.7.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/aggregate-v2.7.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/alter-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/alter-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/alter-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/alter-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/anonymize-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/anonymize-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/anonymize-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/filters/anonymize-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/filters/bytesize-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/checksum-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/checksum-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/checksum-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/cidr-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/cidr-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/cidr-v3.1.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/cidr-v3.1.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/cipher-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/cipher-v2.0.6.asciidoc delete mode 100644 docs/versioned-plugins/filters/cipher-v2.0.7.asciidoc delete mode 100644 docs/versioned-plugins/filters/cipher-v3.0.0.asciidoc delete mode 100644 docs/versioned-plugins/filters/cipher-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/clone-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/clone-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/clone-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/clone-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/filters/cloudfoundry-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/collate-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/collate-v2.0.5.asciidoc delete mode 100644 docs/versioned-plugins/filters/collate-v2.0.6.asciidoc delete mode 100644 docs/versioned-plugins/filters/csv-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/csv-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/csv-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/csv-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/filters/csv-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/filters/csv-v3.0.7.asciidoc delete mode 100644 docs/versioned-plugins/filters/date-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/date-v3.1.7.asciidoc delete mode 100644 docs/versioned-plugins/filters/date-v3.1.8.asciidoc delete mode 100644 docs/versioned-plugins/filters/date-v3.1.9.asciidoc delete mode 100644 docs/versioned-plugins/filters/de_dot-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/de_dot-v1.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/de_dot-v1.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/de_dot-v1.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/debug-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/dissect-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/dissect-v1.0.9.asciidoc delete mode 100644 docs/versioned-plugins/filters/dissect-v1.1.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/dissect-v1.1.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/dns-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/dns-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/dns-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/filters/dns-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/filters/dns-v3.0.7.asciidoc delete mode 100644 docs/versioned-plugins/filters/drop-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/drop-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/drop-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/drop-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/filters/elapsed-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/elapsed-v4.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/elapsed-v4.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/elapsed-v4.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/elasticsearch-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/elasticsearch-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/elasticsearch-v3.1.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/elasticsearch-v3.1.5.asciidoc delete mode 100644 docs/versioned-plugins/filters/elasticsearch-v3.1.6.asciidoc delete mode 100644 docs/versioned-plugins/filters/elasticsearch-v3.2.0.asciidoc delete mode 100644 docs/versioned-plugins/filters/elasticsearch-v3.2.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/emoji-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/emoji-v1.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/emoji-v1.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/environment-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/environment-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/environment-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/environment-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/example-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/extractnumbers-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/extractnumbers-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/extractnumbers-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/extractnumbers-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/fingerprint-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/fingerprint-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/fingerprint-v3.1.0.asciidoc delete mode 100644 docs/versioned-plugins/filters/fingerprint-v3.1.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/fingerprint-v3.1.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/geoip-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/geoip-v4.1.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/geoip-v4.2.0.asciidoc delete mode 100644 docs/versioned-plugins/filters/geoip-v4.2.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/geoip-v4.3.0.asciidoc delete mode 100644 docs/versioned-plugins/filters/geoip-v4.3.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/geoip-v5.0.0.asciidoc delete mode 100644 docs/versioned-plugins/filters/geoip-v5.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/geoip-v5.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/geoip-v5.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/hashid-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/hashid-v0.1.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/hashid-v0.1.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/i18n-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/i18n-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/i18n-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/i18n-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/jdbc_static-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/jdbc_streaming-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/jdbc_streaming-v1.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/jdbc_streaming-v1.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/jdbc_streaming-v1.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/json-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/json-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/json-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/json-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/filters/json_encode-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/json_encode-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/json_encode-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/json_encode-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/kubernetes_metadata-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/kv-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/kv-v4.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/kv-v4.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/kv-v4.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/language-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/lookup-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/math-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/metaevent-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/metaevent-v2.0.5.asciidoc delete mode 100644 docs/versioned-plugins/filters/metaevent-v2.0.7.asciidoc delete mode 100644 docs/versioned-plugins/filters/metricize-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/metricize-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/metricize-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/metricize-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/metrics-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/metrics-v4.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/metrics-v4.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/metrics-v4.0.5.asciidoc delete mode 100644 docs/versioned-plugins/filters/multiline-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/multiline-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/multiline-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/oui-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/oui-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/oui-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/prune-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/prune-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/prune-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/prune-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/punct-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/punct-v2.0.5.asciidoc delete mode 100644 docs/versioned-plugins/filters/punct-v2.0.6.asciidoc delete mode 100644 docs/versioned-plugins/filters/range-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/range-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/range-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/range-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/script-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/split-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/split-v3.1.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/split-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/split-v3.1.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/split-v3.1.5.asciidoc delete mode 100644 docs/versioned-plugins/filters/split-v3.1.6.asciidoc delete mode 100644 docs/versioned-plugins/filters/syslog_pri-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/syslog_pri-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/syslog_pri-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/syslog_pri-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/filters/throttle-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/throttle-v4.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/throttle-v4.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/throttle-v4.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/tld-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/tld-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/tld-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/tld-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/translate-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/translate-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/translate-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/translate-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/truncate-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/truncate-v1.0.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/truncate-v1.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/truncate-v1.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/unique-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/unique-v2.0.5.asciidoc delete mode 100644 docs/versioned-plugins/filters/unique-v2.0.6.asciidoc delete mode 100644 docs/versioned-plugins/filters/unique-v3.0.0.asciidoc delete mode 100644 docs/versioned-plugins/filters/urldecode-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/urldecode-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/urldecode-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/filters/urldecode-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/filters/useragent-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/useragent-v3.1.0.asciidoc delete mode 100644 docs/versioned-plugins/filters/useragent-v3.1.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/useragent-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/useragent-v3.2.0.asciidoc delete mode 100644 docs/versioned-plugins/filters/useragent-v3.2.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/useragent-v3.2.2.asciidoc delete mode 100644 docs/versioned-plugins/filters/uuid-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/uuid-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/uuid-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/uuid-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/filters/xml-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/xml-v4.0.3.asciidoc delete mode 100644 docs/versioned-plugins/filters/xml-v4.0.4.asciidoc delete mode 100644 docs/versioned-plugins/filters/xml-v4.0.5.asciidoc delete mode 100644 docs/versioned-plugins/filters/yaml-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/yaml-v0.1.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/zeromq-index.asciidoc delete mode 100644 docs/versioned-plugins/filters/zeromq-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/filters/zeromq-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/cloudwatch-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/cloudwatch-v2.0.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/cloudwatch-v2.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/cloudwatch-v2.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/cloudwatch_logs-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/couchdb_changes-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/couchdb_changes-v3.1.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/couchdb_changes-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/couchdb_changes-v3.1.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.0.6.asciidoc delete mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.1.0.asciidoc delete mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.1.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/dead_letter_queue-v1.1.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/drupal_dblog-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/drupal_dblog-v2.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/drupal_dblog-v2.0.6.asciidoc delete mode 100644 docs/versioned-plugins/inputs/dynamodb-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/elasticsearch-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/elasticsearch-v4.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/elasticsearch-v4.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/elasticsearch-v4.0.6.asciidoc delete mode 100644 docs/versioned-plugins/inputs/elasticsearch-v4.1.0.asciidoc delete mode 100644 docs/versioned-plugins/inputs/elasticsearch-v4.1.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/eventlog-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/eventlog-v4.1.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/eventlog-v4.1.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/example-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/exec-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/exec-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/exec-v3.1.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/exec-v3.1.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/file-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/file-v4.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/file-v4.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/fluentd-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/ganglia-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/ganglia-v3.1.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/ganglia-v3.1.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/ganglia-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/gelf-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/gelf-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/gelf-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/gelf-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/inputs/gelf-v3.0.7.asciidoc delete mode 100644 docs/versioned-plugins/inputs/gemfire-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/gemfire-v2.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/gemfire-v2.0.6.asciidoc delete mode 100644 docs/versioned-plugins/inputs/generator-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/generator-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/generator-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/generator-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/github-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/github-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/github-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/github-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/google_pubsub-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/google_pubsub-v1.0.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/google_pubsub-v1.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/google_pubsub-v1.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/googleanalytics-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/graphite-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/graphite-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/graphite-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/heartbeat-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/heartbeat-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/heartbeat-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/heartbeat-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/heroku-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/heroku-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/heroku-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/http_poller-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/http_poller-v3.3.0.asciidoc delete mode 100644 docs/versioned-plugins/inputs/http_poller-v3.3.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/http_poller-v3.3.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/http_poller-v3.3.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/http_poller-v3.3.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/http_poller-v4.0.0.asciidoc delete mode 100644 docs/versioned-plugins/inputs/http_poller-v4.0.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/http_poller-v4.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/http_poller-v4.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/http_poller-v4.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/imap-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/imap-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/imap-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/imap-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/irc-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/irc-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/irc-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/irc-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/irc-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jdbc-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jdbc-v4.2.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jdbc-v4.2.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jdbc-v4.2.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jdbc-v4.2.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jdbc-v4.3.0.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jdbc-v4.3.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jdbc-v4.3.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jdbc-v4.3.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jms-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jms-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jms-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jms-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jmx-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jmx-pipe-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jmx-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jmx-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/jmx-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/journald-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/journald-v2.0.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/kafka-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/kafka-v6.3.0.asciidoc delete mode 100644 docs/versioned-plugins/inputs/kafka-v6.3.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/kafka-v6.3.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/kafka-v6.3.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/kafka-v7.0.0.asciidoc delete mode 100644 docs/versioned-plugins/inputs/kafka-v8.0.0.asciidoc delete mode 100644 docs/versioned-plugins/inputs/kafka-v8.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/kafka-v8.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/kinesis-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/kinesis-v2.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/kinesis-v2.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/kinesis-v2.0.6.asciidoc delete mode 100644 docs/versioned-plugins/inputs/kinesis-v2.0.7.asciidoc delete mode 100644 docs/versioned-plugins/inputs/log4j-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/log4j-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/inputs/log4j-v3.1.0.asciidoc delete mode 100644 docs/versioned-plugins/inputs/log4j-v3.1.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/log4j-v3.1.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/log4j2-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/lumberjack-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/lumberjack-v3.1.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/lumberjack-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/lumberjack-v3.1.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/meetup-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/meetup-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/meetup-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/meetup-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/mongodb-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/neo4j-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/neo4j-v2.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/neo4j-v2.0.6.asciidoc delete mode 100644 docs/versioned-plugins/inputs/netflow-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/perfmon-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/pipe-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/pipe-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/pipe-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/pipe-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/inputs/puppet_facter-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/puppet_facter-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/puppet_facter-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/puppet_facter-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/rabbitmq-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/rabbitmq-v5.2.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/rabbitmq-v5.2.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/rabbitmq-v6.0.0.asciidoc delete mode 100644 docs/versioned-plugins/inputs/rabbitmq-v6.0.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/rabbitmq-v6.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/rackspace-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/rackspace-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/redis-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/redis-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/redis-v3.1.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/redis-v3.1.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/redis-v3.1.6.asciidoc delete mode 100644 docs/versioned-plugins/inputs/relp-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/relp-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/relp-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/relp-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/rss-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/rss-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/rss-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/rss-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/s3sqs-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/salesforce-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/salesforce-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/salesforce-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/snmptrap-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/snmptrap-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/snmptrap-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/snmptrap-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/sqlite-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/sqlite-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/sqlite-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/sqlite-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/sqs-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/sqs-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/sqs-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/sqs-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/sqs-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/inputs/stdin-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/stdin-v3.2.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/stdin-v3.2.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/stdin-v3.2.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/stomp-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/stomp-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/stomp-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/stomp-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/inputs/stomp-v3.0.7.asciidoc delete mode 100644 docs/versioned-plugins/inputs/syslog-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/syslog-v3.2.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/syslog-v3.2.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/syslog-v3.2.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/syslog-v3.2.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/twitter-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/twitter-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/twitter-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/twitter-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/inputs/twitter-v3.0.7.asciidoc delete mode 100644 docs/versioned-plugins/inputs/udp-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/udp-v3.1.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/udp-v3.1.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/udp-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/udp-v3.2.0.asciidoc delete mode 100644 docs/versioned-plugins/inputs/udp-v3.2.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/unix-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/unix-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/unix-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/unix-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/inputs/varnishlog-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/varnishlog-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/varnishlog-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/varnishlog-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/websocket-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/websocket-v4.0.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/websocket-v4.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/websocket-v4.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/wmi-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/wmi-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/inputs/wmi-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/inputs/wmi-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/xmpp-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/xmpp-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/inputs/xmpp-v3.1.4.asciidoc delete mode 100644 docs/versioned-plugins/inputs/xmpp-v3.1.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/xmpp-v3.1.6.asciidoc delete mode 100644 docs/versioned-plugins/inputs/zenoss-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/zenoss-v2.0.5.asciidoc delete mode 100644 docs/versioned-plugins/inputs/zenoss-v2.0.6.asciidoc delete mode 100644 docs/versioned-plugins/inputs/zeromq-index.asciidoc delete mode 100644 docs/versioned-plugins/inputs/zeromq-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/beats-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/boundary-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/boundary-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/boundary-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/boundary-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/circonus-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/circonus-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/circonus-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/circonus-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/cloudwatch-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/cloudwatch-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/cloudwatch-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/outputs/cloudwatch-v3.0.7.asciidoc delete mode 100644 docs/versioned-plugins/outputs/csv-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/csv-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/csv-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/csv-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/outputs/datadog-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/datadog-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/datadog-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/datadog-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/datadog_metrics-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/datadog_metrics-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/datadog_metrics-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/elasticsearch_java-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/elasticsearch_java-v2.1.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/example-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/exec-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/exec-v3.1.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/exec-v3.1.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/exec-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/firehose-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/ganglia-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/ganglia-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/ganglia-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/ganglia-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/gelf-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/gelf-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/gelf-v3.1.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/gemfire-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/gemfire-v2.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/gemfire-v2.0.6.asciidoc delete mode 100644 docs/versioned-plugins/outputs/google_bigquery-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/google_bigquery-v3.2.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/google_bigquery-v3.2.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/google_bigquery-v3.2.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/google_cloud_storage-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/google_cloud_storage-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/google_cloud_storage-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/graphtastic-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/graphtastic-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/graphtastic-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/graphtastic-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/hipchat-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/hipchat-v4.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/http-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/http-v4.3.0.asciidoc delete mode 100644 docs/versioned-plugins/outputs/http-v4.3.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/http-v4.3.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/http-v4.3.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/http-v4.4.0.asciidoc delete mode 100644 docs/versioned-plugins/outputs/http-v5.0.0.asciidoc delete mode 100644 docs/versioned-plugins/outputs/http-v5.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/http-v5.1.0.asciidoc delete mode 100644 docs/versioned-plugins/outputs/http-v5.1.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/http-v5.1.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/icinga-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/influxdb-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/influxdb-v5.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/influxdb-v5.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/influxdb-v5.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/irc-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/irc-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/irc-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/irc-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/jira-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/jira-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/jira-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/jms-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/jms-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/juggernaut-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/juggernaut-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/juggernaut-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/juggernaut-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/kafka-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/kafka-v6.2.0.asciidoc delete mode 100644 docs/versioned-plugins/outputs/kafka-v6.2.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/kafka-v6.2.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/kafka-v7.0.0.asciidoc delete mode 100644 docs/versioned-plugins/outputs/kafka-v7.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/kafka-v7.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/kafka-v7.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/kafka-v7.0.6.asciidoc delete mode 100644 docs/versioned-plugins/outputs/librato-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/librato-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/librato-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/librato-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/logentries-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/loggly-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/loggly-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/loggly-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/loggly-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/lumberjack-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/lumberjack-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/lumberjack-v3.1.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/metriccatcher-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/metriccatcher-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/metriccatcher-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/metriccatcher-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/monasca_log_api-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/monasca_log_api-v1.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/mongodb-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/mongodb-v3.1.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/mongodb-v3.1.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/mongodb-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/nagios-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/nagios-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/nagios-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/nagios-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/nagios_nsca-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/nagios_nsca-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/nagios_nsca-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/nagios_nsca-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/neo4j-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/neo4j-v2.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/newrelic-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/null-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/null-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/null-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/opentsdb-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/opentsdb-v3.1.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/opentsdb-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/opentsdb-v3.1.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/pagerduty-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/pagerduty-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/pagerduty-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/pagerduty-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/outputs/pipe-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/pipe-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/pipe-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/pipe-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/rabbitmq-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/rabbitmq-v4.0.8.asciidoc delete mode 100644 docs/versioned-plugins/outputs/rabbitmq-v4.0.9.asciidoc delete mode 100644 docs/versioned-plugins/outputs/rabbitmq-v5.0.0.asciidoc delete mode 100644 docs/versioned-plugins/outputs/rabbitmq-v5.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/rabbitmq-v5.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/rabbitmq-v5.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/rabbitmq-v5.1.0.asciidoc delete mode 100644 docs/versioned-plugins/outputs/rackspace-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/rackspace-v2.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/rackspace-v2.0.7.asciidoc delete mode 100644 docs/versioned-plugins/outputs/rados-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/redis-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/redis-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/redis-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/redis-v4.0.0.asciidoc delete mode 100644 docs/versioned-plugins/outputs/redis-v4.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/redis-v4.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/redis-v4.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/redmine-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/redmine-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/redmine-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/redmine-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/riak-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/riak-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/riak-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/riak-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/riemann-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/riemann-v3.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/riemann-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/riemann-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/s3-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/s3-v4.0.8.asciidoc delete mode 100644 docs/versioned-plugins/outputs/s3-v4.0.9.asciidoc delete mode 100644 docs/versioned-plugins/outputs/slack-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/slack-v2.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/slack-v2.1.0.asciidoc delete mode 100644 docs/versioned-plugins/outputs/sns-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/sns-v4.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/sns-v4.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/sns-v4.0.6.asciidoc delete mode 100644 docs/versioned-plugins/outputs/solr_http-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/solr_http-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/solr_http-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/solr_http-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/sqs-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/sqs-v4.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/sqs-v4.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/sqs-v5.0.0.asciidoc delete mode 100644 docs/versioned-plugins/outputs/sqs-v5.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/sqs-v5.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/statsd-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/statsd-v3.1.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/statsd-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/statsd-v3.1.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/stdout-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/stdout-v3.1.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/stdout-v3.1.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/stdout-v3.1.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/stomp-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/stomp-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/stomp-v3.0.7.asciidoc delete mode 100644 docs/versioned-plugins/outputs/stomp-v3.0.8.asciidoc delete mode 100644 docs/versioned-plugins/outputs/syslog-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/syslog-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/syslog-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/syslog-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/tcp-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/tcp-v4.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/tcp-v4.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/tcp-v5.0.0.asciidoc delete mode 100644 docs/versioned-plugins/outputs/tcp-v5.0.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/tcp-v5.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/timber-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/timber-v1.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/udp-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/udp-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/udp-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/udp-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/webhdfs-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/webhdfs-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/webhdfs-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/webhdfs-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/websocket-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/websocket-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/websocket-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/websocket-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/xmpp-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/xmpp-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/xmpp-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/xmpp-v3.0.5.asciidoc delete mode 100644 docs/versioned-plugins/outputs/xmpp-v3.0.6.asciidoc delete mode 100644 docs/versioned-plugins/outputs/xmpp-v3.0.7.asciidoc delete mode 100644 docs/versioned-plugins/outputs/zabbix-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/zabbix-v3.0.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/zabbix-v3.0.3.asciidoc delete mode 100644 docs/versioned-plugins/outputs/zabbix-v3.0.4.asciidoc delete mode 100644 docs/versioned-plugins/outputs/zeromq-index.asciidoc delete mode 100644 docs/versioned-plugins/outputs/zeromq-v3.1.1.asciidoc delete mode 100644 docs/versioned-plugins/outputs/zeromq-v3.1.2.asciidoc delete mode 100644 docs/versioned-plugins/outputs/zookeeper-index.asciidoc diff --git a/docs/versioned-plugins/codecs-index.asciidoc b/docs/versioned-plugins/codecs-index.asciidoc index 8efd1e6ee..02e8a7ba7 100644 --- a/docs/versioned-plugins/codecs-index.asciidoc +++ b/docs/versioned-plugins/codecs-index.asciidoc @@ -3,34 +3,7 @@ include::include/plugin-intro.asciidoc[] -include::codecs/avro-index.asciidoc[] include::codecs/cef-index.asciidoc[] -include::codecs/cloudfront-index.asciidoc[] -include::codecs/cloudtrail-index.asciidoc[] -include::codecs/collectd-index.asciidoc[] -include::codecs/compress_spooler-index.asciidoc[] -include::codecs/csv-index.asciidoc[] -include::codecs/dots-index.asciidoc[] -include::codecs/edn-index.asciidoc[] -include::codecs/edn_lines-index.asciidoc[] -include::codecs/es_bulk-index.asciidoc[] -include::codecs/example-index.asciidoc[] -include::codecs/fluent-index.asciidoc[] -include::codecs/graphite-index.asciidoc[] -include::codecs/gzip_lines-index.asciidoc[] include::codecs/json-index.asciidoc[] -include::codecs/json_lines-index.asciidoc[] -include::codecs/json_pretty-index.asciidoc[] -include::codecs/line-index.asciidoc[] -include::codecs/msgpack-index.asciidoc[] -include::codecs/multiline-index.asciidoc[] -include::codecs/netflow-index.asciidoc[] -include::codecs/nmap-index.asciidoc[] -include::codecs/oldlogstashjson-index.asciidoc[] -include::codecs/plain-index.asciidoc[] -include::codecs/pretty-index.asciidoc[] -include::codecs/protobuf-index.asciidoc[] include::codecs/rubydebug-index.asciidoc[] -include::codecs/s3plain-index.asciidoc[] -include::codecs/sflow-index.asciidoc[] diff --git a/docs/versioned-plugins/codecs/avro-index.asciidoc b/docs/versioned-plugins/codecs/avro-index.asciidoc deleted file mode 100644 index 5278efd13..000000000 --- a/docs/versioned-plugins/codecs/avro-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: avro -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -| <> | 2017-05-19 -|======================================================================= - -include::avro-v3.2.3.asciidoc[] -include::avro-v3.2.2.asciidoc[] -include::avro-v3.2.1.asciidoc[] -include::avro-v3.2.0.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/avro-v3.2.0.asciidoc b/docs/versioned-plugins/codecs/avro-v3.2.0.asciidoc deleted file mode 100644 index 1ebdfe7c6..000000000 --- a/docs/versioned-plugins/codecs/avro-v3.2.0.asciidoc +++ /dev/null @@ -1,103 +0,0 @@ -:plugin: avro -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.0 -:release_date: 2017-05-19 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-avro/blob/v3.2.0/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Avro - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read serialized Avro records as Logstash events - -This plugin is used to serialize Logstash events as -Avro datums, as well as deserializing Avro datums into -Logstash events. - -==== Encoding - -This codec is for serializing individual Logstash events -as Avro datums that are Avro binary blobs. It does not encode -Logstash events into an Avro file. - - -==== Decoding - -This codec is for deserializing individual Avro records. It is not for reading -Avro files. Avro files have a unique format that must be handled upon input. - - -==== Usage -Example usage with Kafka input. - -[source,ruby] ----------------------------------- -input { - kafka { - codec => avro { - schema_uri => "/tmp/schema.avsc" - } - } -} -filter { - ... -} -output { - ... -} ----------------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Avro Codec Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-schema_uri>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -|======================================================================= - -Also see <> for a list of options supported by all -codec plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-schema_uri"] -===== `schema_uri` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -schema path to fetch the schema from. -This can be a 'http' or 'file' scheme URI -example: - -* http - `http://example.com/schema.avsc` -* file - `/path/to/schema.avsc` - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `false` - -tag events with `_avroparsefailure` when decode fails - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/codecs/avro-v3.2.1.asciidoc b/docs/versioned-plugins/codecs/avro-v3.2.1.asciidoc deleted file mode 100644 index b698baa97..000000000 --- a/docs/versioned-plugins/codecs/avro-v3.2.1.asciidoc +++ /dev/null @@ -1,96 +0,0 @@ -:plugin: avro -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-avro/blob/v3.2.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Avro codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read serialized Avro records as Logstash events - -This plugin is used to serialize Logstash events as -Avro datums, as well as deserializing Avro datums into -Logstash events. - -==== Encoding - -This codec is for serializing individual Logstash events -as Avro datums that are Avro binary blobs. It does not encode -Logstash events into an Avro file. - - -==== Decoding - -This codec is for deserializing individual Avro records. It is not for reading -Avro files. Avro files have a unique format that must be handled upon input. - - -==== Usage -Example usage with Kafka input. - -[source,ruby] ----------------------------------- -input { - kafka { - codec => avro { - schema_uri => "/tmp/schema.avsc" - } - } -} -filter { - ... -} -output { - ... -} ----------------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Avro Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-schema_uri>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-schema_uri"] -===== `schema_uri` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -schema path to fetch the schema from. -This can be a 'http' or 'file' scheme URI -example: - -* http - `http://example.com/schema.avsc` -* file - `/path/to/schema.avsc` - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `false` - -tag events with `_avroparsefailure` when decode fails - - diff --git a/docs/versioned-plugins/codecs/avro-v3.2.2.asciidoc b/docs/versioned-plugins/codecs/avro-v3.2.2.asciidoc deleted file mode 100644 index 3deabc7a8..000000000 --- a/docs/versioned-plugins/codecs/avro-v3.2.2.asciidoc +++ /dev/null @@ -1,96 +0,0 @@ -:plugin: avro -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-avro/blob/v3.2.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Avro codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read serialized Avro records as Logstash events - -This plugin is used to serialize Logstash events as -Avro datums, as well as deserializing Avro datums into -Logstash events. - -==== Encoding - -This codec is for serializing individual Logstash events -as Avro datums that are Avro binary blobs. It does not encode -Logstash events into an Avro file. - - -==== Decoding - -This codec is for deserializing individual Avro records. It is not for reading -Avro files. Avro files have a unique format that must be handled upon input. - - -==== Usage -Example usage with Kafka input. - -[source,ruby] ----------------------------------- -input { - kafka { - codec => avro { - schema_uri => "/tmp/schema.avsc" - } - } -} -filter { - ... -} -output { - ... -} ----------------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Avro Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-schema_uri>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-schema_uri"] -===== `schema_uri` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -schema path to fetch the schema from. -This can be a 'http' or 'file' scheme URI -example: - -* http - `http://example.com/schema.avsc` -* file - `/path/to/schema.avsc` - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `false` - -tag events with `_avroparsefailure` when decode fails - - diff --git a/docs/versioned-plugins/codecs/avro-v3.2.3.asciidoc b/docs/versioned-plugins/codecs/avro-v3.2.3.asciidoc deleted file mode 100644 index 6af43c511..000000000 --- a/docs/versioned-plugins/codecs/avro-v3.2.3.asciidoc +++ /dev/null @@ -1,96 +0,0 @@ -:plugin: avro -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-avro/blob/v3.2.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Avro codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read serialized Avro records as Logstash events - -This plugin is used to serialize Logstash events as -Avro datums, as well as deserializing Avro datums into -Logstash events. - -==== Encoding - -This codec is for serializing individual Logstash events -as Avro datums that are Avro binary blobs. It does not encode -Logstash events into an Avro file. - - -==== Decoding - -This codec is for deserializing individual Avro records. It is not for reading -Avro files. Avro files have a unique format that must be handled upon input. - - -==== Usage -Example usage with Kafka input. - -[source,ruby] ----------------------------------- -input { - kafka { - codec => avro { - schema_uri => "/tmp/schema.avsc" - } - } -} -filter { - ... -} -output { - ... -} ----------------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Avro Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-schema_uri>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-schema_uri"] -===== `schema_uri` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -schema path to fetch the schema from. -This can be a 'http' or 'file' scheme URI -example: - -* http - `http://example.com/schema.avsc` -* file - `/path/to/schema.avsc` - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `false` - -tag events with `_avroparsefailure` when decode fails - - diff --git a/docs/versioned-plugins/codecs/cloudfront-index.asciidoc b/docs/versioned-plugins/codecs/cloudfront-index.asciidoc deleted file mode 100644 index 197e7ac9d..000000000 --- a/docs/versioned-plugins/codecs/cloudfront-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: cloudfront -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::cloudfront-v3.0.3.asciidoc[] -include::cloudfront-v3.0.2.asciidoc[] -include::cloudfront-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/cloudfront-v3.0.1.asciidoc b/docs/versioned-plugins/codecs/cloudfront-v3.0.1.asciidoc deleted file mode 100644 index 736fae151..000000000 --- a/docs/versioned-plugins/codecs/cloudfront-v3.0.1.asciidoc +++ /dev/null @@ -1,52 +0,0 @@ -:plugin: cloudfront -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-cloudfront/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Cloudfront codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec will read cloudfront encoded content - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cloudfront Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this codec. Examples include "UTF-8" and -"CP1252" - -JSON requires valid UTF-8 strings, but in some cases, software that -emits JSON does so in another encoding (nxlog, for example). In -weird cases like this, you can set the charset setting to the -actual encoding of the text and logstash will convert it for you. - -For nxlog users, you'll want to set this to "CP1252" - - diff --git a/docs/versioned-plugins/codecs/cloudfront-v3.0.2.asciidoc b/docs/versioned-plugins/codecs/cloudfront-v3.0.2.asciidoc deleted file mode 100644 index ecf6dd755..000000000 --- a/docs/versioned-plugins/codecs/cloudfront-v3.0.2.asciidoc +++ /dev/null @@ -1,52 +0,0 @@ -:plugin: cloudfront -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-cloudfront/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Cloudfront codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec will read cloudfront encoded content - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cloudfront Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this codec. Examples include "UTF-8" and -"CP1252" - -JSON requires valid UTF-8 strings, but in some cases, software that -emits JSON does so in another encoding (nxlog, for example). In -weird cases like this, you can set the charset setting to the -actual encoding of the text and logstash will convert it for you. - -For nxlog users, you'll want to set this to "CP1252" - - diff --git a/docs/versioned-plugins/codecs/cloudfront-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/cloudfront-v3.0.3.asciidoc deleted file mode 100644 index d65f2ae39..000000000 --- a/docs/versioned-plugins/codecs/cloudfront-v3.0.3.asciidoc +++ /dev/null @@ -1,52 +0,0 @@ -:plugin: cloudfront -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-cloudfront/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Cloudfront codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec will read cloudfront encoded content - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cloudfront Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this codec. Examples include "UTF-8" and -"CP1252" - -JSON requires valid UTF-8 strings, but in some cases, software that -emits JSON does so in another encoding (nxlog, for example). In -weird cases like this, you can set the charset setting to the -actual encoding of the text and logstash will convert it for you. - -For nxlog users, you'll want to set this to "CP1252" - - diff --git a/docs/versioned-plugins/codecs/cloudtrail-index.asciidoc b/docs/versioned-plugins/codecs/cloudtrail-index.asciidoc deleted file mode 100644 index 67a3c393c..000000000 --- a/docs/versioned-plugins/codecs/cloudtrail-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: cloudtrail -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::cloudtrail-v3.0.3.asciidoc[] -include::cloudtrail-v3.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/cloudtrail-v3.0.2.asciidoc b/docs/versioned-plugins/codecs/cloudtrail-v3.0.2.asciidoc deleted file mode 100644 index de5d86860..000000000 --- a/docs/versioned-plugins/codecs/cloudtrail-v3.0.2.asciidoc +++ /dev/null @@ -1,44 +0,0 @@ -:plugin: cloudtrail -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-cloudtrail/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Cloudtrail codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This is the base class for logstash codecs. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cloudtrail Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - - - - diff --git a/docs/versioned-plugins/codecs/cloudtrail-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/cloudtrail-v3.0.3.asciidoc deleted file mode 100644 index e9be8a358..000000000 --- a/docs/versioned-plugins/codecs/cloudtrail-v3.0.3.asciidoc +++ /dev/null @@ -1,44 +0,0 @@ -:plugin: cloudtrail -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-cloudtrail/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Cloudtrail codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This is the base class for logstash codecs. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cloudtrail Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - - - - diff --git a/docs/versioned-plugins/codecs/collectd-index.asciidoc b/docs/versioned-plugins/codecs/collectd-index.asciidoc deleted file mode 100644 index 237e60ab0..000000000 --- a/docs/versioned-plugins/codecs/collectd-index.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -:plugin: collectd -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-30 -| <> | 2017-06-23 -| <> | 2017-06-06 -|======================================================================= - -include::collectd-v3.0.8.asciidoc[] -include::collectd-v3.0.7.asciidoc[] -include::collectd-v3.0.6.asciidoc[] -include::collectd-v3.0.5.asciidoc[] -include::collectd-v3.0.4.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/collectd-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/collectd-v3.0.4.asciidoc deleted file mode 100644 index 075bf0b6b..000000000 --- a/docs/versioned-plugins/codecs/collectd-v3.0.4.asciidoc +++ /dev/null @@ -1,147 +0,0 @@ -:plugin: collectd -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-06-06 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-collectd/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Collectd - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from the collectd binary protocol over the network via udp. -See https://collectd.org/wiki/index.php/Binary_protocol - -Configuration in your Logstash configuration file can be as simple as: -[source,ruby] - input { - udp { - port => 25826 - buffer_size => 1452 - codec => collectd { } - } - } - -A sample `collectd.conf` to send to Logstash might be: -[source,xml] - Hostname "host.example.com" - LoadPlugin interface - LoadPlugin load - LoadPlugin memory - LoadPlugin network - - Interface "eth0" - IgnoreSelected false - - - - - - -Be sure to replace `10.0.0.1` with the IP of your Logstash instance. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Collectd Codec Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-authfile>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nan_handling>> |<>, one of `["change_value", "warn", "drop"]`|No -| <<{version}-plugins-{type}s-{plugin}-nan_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nan_value>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prune_intervals>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_level>> |<>, one of `["None", "Sign", "Encrypt"]`|No -| <<{version}-plugins-{type}s-{plugin}-typesdb>> |<>|No -|======================================================================= - -Also see <> for a list of options supported by all -codec plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-authfile"] -===== `authfile` - - * Value type is <> - * There is no default value for this setting. - -Path to the authentication file. This file should have the same format as -the http://collectd.org/documentation/manpages/collectd.conf.5.shtml#authfile_filename[AuthFile] -in collectd. You only need to set this option if the `security_level` is set to -`Sign` or `Encrypt` - -[id="{version}-plugins-{type}s-{plugin}-nan_handling"] -===== `nan_handling` - - * Value can be any of: `change_value`, `warn`, `drop` - * Default value is `"change_value"` - -What to do when a value in the event is `NaN` (Not a Number) - -- change_value (default): Change the `NaN` to the value of the nan_value option and add `nan_tag` as a tag -- warn: Change the `NaN` to the value of the nan_value option, print a warning to the log and add `nan_tag` as a tag -- drop: Drop the event containing the `NaN` (this only drops the single event, not the whole packet) - -[id="{version}-plugins-{type}s-{plugin}-nan_tag"] -===== `nan_tag` - - * Value type is <> - * Default value is `"_collectdNaN"` - -The tag to add to the event if a `NaN` value was found -Set this to an empty string ('') if you don't want to tag - -[id="{version}-plugins-{type}s-{plugin}-nan_value"] -===== `nan_value` - - * Value type is <> - * Default value is `0` - -Only relevant when `nan_handeling` is set to `change_value` -Change NaN to this configured value - -[id="{version}-plugins-{type}s-{plugin}-prune_intervals"] -===== `prune_intervals` - - * Value type is <> - * Default value is `true` - -Prune interval records. Defaults to `true`. - -[id="{version}-plugins-{type}s-{plugin}-security_level"] -===== `security_level` - - * Value can be any of: `None`, `Sign`, `Encrypt` - * Default value is `"None"` - -Security Level. Default is `None`. This setting mirrors the setting from the -collectd https://collectd.org/wiki/index.php/Plugin:Network[Network plugin] - -[id="{version}-plugins-{type}s-{plugin}-typesdb"] -===== `typesdb` - - * Value type is <> - * There is no default value for this setting. - -File path(s) to collectd `types.db` to use. -The last matching pattern wins if you have identical pattern names in multiple files. -If no types.db is provided the included `types.db` will be used (currently 5.4.0). - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/codecs/collectd-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/collectd-v3.0.5.asciidoc deleted file mode 100644 index 4ab88c8d9..000000000 --- a/docs/versioned-plugins/codecs/collectd-v3.0.5.asciidoc +++ /dev/null @@ -1,140 +0,0 @@ -:plugin: collectd -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-collectd/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Collectd codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from the collectd binary protocol over the network via udp. -See https://collectd.org/wiki/index.php/Binary_protocol - -Configuration in your Logstash configuration file can be as simple as: -[source,ruby] - input { - udp { - port => 25826 - buffer_size => 1452 - codec => collectd { } - } - } - -A sample `collectd.conf` to send to Logstash might be: -[source,xml] - Hostname "host.example.com" - LoadPlugin interface - LoadPlugin load - LoadPlugin memory - LoadPlugin network - - Interface "eth0" - IgnoreSelected false - - - - - - -Be sure to replace `10.0.0.1` with the IP of your Logstash instance. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Collectd Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-authfile>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nan_handling>> |<>, one of `["change_value", "warn", "drop"]`|No -| <<{version}-plugins-{type}s-{plugin}-nan_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nan_value>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prune_intervals>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_level>> |<>, one of `["None", "Sign", "Encrypt"]`|No -| <<{version}-plugins-{type}s-{plugin}-typesdb>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-authfile"] -===== `authfile` - - * Value type is <> - * There is no default value for this setting. - -Path to the authentication file. This file should have the same format as -the http://collectd.org/documentation/manpages/collectd.conf.5.shtml#authfile_filename[AuthFile] -in collectd. You only need to set this option if the `security_level` is set to -`Sign` or `Encrypt` - -[id="{version}-plugins-{type}s-{plugin}-nan_handling"] -===== `nan_handling` - - * Value can be any of: `change_value`, `warn`, `drop` - * Default value is `"change_value"` - -What to do when a value in the event is `NaN` (Not a Number) - -- change_value (default): Change the `NaN` to the value of the nan_value option and add `nan_tag` as a tag -- warn: Change the `NaN` to the value of the nan_value option, print a warning to the log and add `nan_tag` as a tag -- drop: Drop the event containing the `NaN` (this only drops the single event, not the whole packet) - -[id="{version}-plugins-{type}s-{plugin}-nan_tag"] -===== `nan_tag` - - * Value type is <> - * Default value is `"_collectdNaN"` - -The tag to add to the event if a `NaN` value was found -Set this to an empty string ('') if you don't want to tag - -[id="{version}-plugins-{type}s-{plugin}-nan_value"] -===== `nan_value` - - * Value type is <> - * Default value is `0` - -Only relevant when `nan_handeling` is set to `change_value` -Change NaN to this configured value - -[id="{version}-plugins-{type}s-{plugin}-prune_intervals"] -===== `prune_intervals` - - * Value type is <> - * Default value is `true` - -Prune interval records. Defaults to `true`. - -[id="{version}-plugins-{type}s-{plugin}-security_level"] -===== `security_level` - - * Value can be any of: `None`, `Sign`, `Encrypt` - * Default value is `"None"` - -Security Level. Default is `None`. This setting mirrors the setting from the -collectd https://collectd.org/wiki/index.php/Plugin:Network[Network plugin] - -[id="{version}-plugins-{type}s-{plugin}-typesdb"] -===== `typesdb` - - * Value type is <> - * There is no default value for this setting. - -File path(s) to collectd `types.db` to use. -The last matching pattern wins if you have identical pattern names in multiple files. -If no types.db is provided the included `types.db` will be used (currently 5.4.0). - - diff --git a/docs/versioned-plugins/codecs/collectd-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/collectd-v3.0.6.asciidoc deleted file mode 100644 index 2369ccf42..000000000 --- a/docs/versioned-plugins/codecs/collectd-v3.0.6.asciidoc +++ /dev/null @@ -1,140 +0,0 @@ -:plugin: collectd -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-06-30 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-collectd/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Collectd codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from the collectd binary protocol over the network via udp. -See https://collectd.org/wiki/index.php/Binary_protocol - -Configuration in your Logstash configuration file can be as simple as: -[source,ruby] - input { - udp { - port => 25826 - buffer_size => 1452 - codec => collectd { } - } - } - -A sample `collectd.conf` to send to Logstash might be: -[source,xml] - Hostname "host.example.com" - LoadPlugin interface - LoadPlugin load - LoadPlugin memory - LoadPlugin network - - Interface "eth0" - IgnoreSelected false - - - - - - -Be sure to replace `10.0.0.1` with the IP of your Logstash instance. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Collectd Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-authfile>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nan_handling>> |<>, one of `["change_value", "warn", "drop"]`|No -| <<{version}-plugins-{type}s-{plugin}-nan_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nan_value>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prune_intervals>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_level>> |<>, one of `["None", "Sign", "Encrypt"]`|No -| <<{version}-plugins-{type}s-{plugin}-typesdb>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-authfile"] -===== `authfile` - - * Value type is <> - * There is no default value for this setting. - -Path to the authentication file. This file should have the same format as -the http://collectd.org/documentation/manpages/collectd.conf.5.shtml#authfile_filename[AuthFile] -in collectd. You only need to set this option if the `security_level` is set to -`Sign` or `Encrypt` - -[id="{version}-plugins-{type}s-{plugin}-nan_handling"] -===== `nan_handling` - - * Value can be any of: `change_value`, `warn`, `drop` - * Default value is `"change_value"` - -What to do when a value in the event is `NaN` (Not a Number) - -- change_value (default): Change the `NaN` to the value of the nan_value option and add `nan_tag` as a tag -- warn: Change the `NaN` to the value of the nan_value option, print a warning to the log and add `nan_tag` as a tag -- drop: Drop the event containing the `NaN` (this only drops the single event, not the whole packet) - -[id="{version}-plugins-{type}s-{plugin}-nan_tag"] -===== `nan_tag` - - * Value type is <> - * Default value is `"_collectdNaN"` - -The tag to add to the event if a `NaN` value was found -Set this to an empty string ('') if you don't want to tag - -[id="{version}-plugins-{type}s-{plugin}-nan_value"] -===== `nan_value` - - * Value type is <> - * Default value is `0` - -Only relevant when `nan_handeling` is set to `change_value` -Change NaN to this configured value - -[id="{version}-plugins-{type}s-{plugin}-prune_intervals"] -===== `prune_intervals` - - * Value type is <> - * Default value is `true` - -Prune interval records. Defaults to `true`. - -[id="{version}-plugins-{type}s-{plugin}-security_level"] -===== `security_level` - - * Value can be any of: `None`, `Sign`, `Encrypt` - * Default value is `"None"` - -Security Level. Default is `None`. This setting mirrors the setting from the -collectd https://collectd.org/wiki/index.php/Plugin:Network[Network plugin] - -[id="{version}-plugins-{type}s-{plugin}-typesdb"] -===== `typesdb` - - * Value type is <> - * There is no default value for this setting. - -File path(s) to collectd `types.db` to use. -The last matching pattern wins if you have identical pattern names in multiple files. -If no types.db is provided the included `types.db` will be used (currently 5.4.0). - - diff --git a/docs/versioned-plugins/codecs/collectd-v3.0.7.asciidoc b/docs/versioned-plugins/codecs/collectd-v3.0.7.asciidoc deleted file mode 100644 index 6e0a0e0f1..000000000 --- a/docs/versioned-plugins/codecs/collectd-v3.0.7.asciidoc +++ /dev/null @@ -1,140 +0,0 @@ -:plugin: collectd -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.7 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-collectd/blob/v3.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Collectd codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from the collectd binary protocol over the network via udp. -See https://collectd.org/wiki/index.php/Binary_protocol - -Configuration in your Logstash configuration file can be as simple as: -[source,ruby] - input { - udp { - port => 25826 - buffer_size => 1452 - codec => collectd { } - } - } - -A sample `collectd.conf` to send to Logstash might be: -[source,xml] - Hostname "host.example.com" - LoadPlugin interface - LoadPlugin load - LoadPlugin memory - LoadPlugin network - - Interface "eth0" - IgnoreSelected false - - - - - - -Be sure to replace `10.0.0.1` with the IP of your Logstash instance. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Collectd Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-authfile>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nan_handling>> |<>, one of `["change_value", "warn", "drop"]`|No -| <<{version}-plugins-{type}s-{plugin}-nan_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nan_value>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prune_intervals>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_level>> |<>, one of `["None", "Sign", "Encrypt"]`|No -| <<{version}-plugins-{type}s-{plugin}-typesdb>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-authfile"] -===== `authfile` - - * Value type is <> - * There is no default value for this setting. - -Path to the authentication file. This file should have the same format as -the http://collectd.org/documentation/manpages/collectd.conf.5.shtml#authfile_filename[AuthFile] -in collectd. You only need to set this option if the `security_level` is set to -`Sign` or `Encrypt` - -[id="{version}-plugins-{type}s-{plugin}-nan_handling"] -===== `nan_handling` - - * Value can be any of: `change_value`, `warn`, `drop` - * Default value is `"change_value"` - -What to do when a value in the event is `NaN` (Not a Number) - -- change_value (default): Change the `NaN` to the value of the nan_value option and add `nan_tag` as a tag -- warn: Change the `NaN` to the value of the nan_value option, print a warning to the log and add `nan_tag` as a tag -- drop: Drop the event containing the `NaN` (this only drops the single event, not the whole packet) - -[id="{version}-plugins-{type}s-{plugin}-nan_tag"] -===== `nan_tag` - - * Value type is <> - * Default value is `"_collectdNaN"` - -The tag to add to the event if a `NaN` value was found -Set this to an empty string ('') if you don't want to tag - -[id="{version}-plugins-{type}s-{plugin}-nan_value"] -===== `nan_value` - - * Value type is <> - * Default value is `0` - -Only relevant when `nan_handeling` is set to `change_value` -Change NaN to this configured value - -[id="{version}-plugins-{type}s-{plugin}-prune_intervals"] -===== `prune_intervals` - - * Value type is <> - * Default value is `true` - -Prune interval records. Defaults to `true`. - -[id="{version}-plugins-{type}s-{plugin}-security_level"] -===== `security_level` - - * Value can be any of: `None`, `Sign`, `Encrypt` - * Default value is `"None"` - -Security Level. Default is `None`. This setting mirrors the setting from the -collectd https://collectd.org/wiki/index.php/Plugin:Network[Network plugin] - -[id="{version}-plugins-{type}s-{plugin}-typesdb"] -===== `typesdb` - - * Value type is <> - * There is no default value for this setting. - -File path(s) to collectd `types.db` to use. -The last matching pattern wins if you have identical pattern names in multiple files. -If no types.db is provided the included `types.db` will be used (currently 5.4.0). - - diff --git a/docs/versioned-plugins/codecs/collectd-v3.0.8.asciidoc b/docs/versioned-plugins/codecs/collectd-v3.0.8.asciidoc deleted file mode 100644 index 0c4a98655..000000000 --- a/docs/versioned-plugins/codecs/collectd-v3.0.8.asciidoc +++ /dev/null @@ -1,140 +0,0 @@ -:plugin: collectd -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.8 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-collectd/blob/v3.0.8/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Collectd codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from the collectd binary protocol over the network via udp. -See https://collectd.org/wiki/index.php/Binary_protocol - -Configuration in your Logstash configuration file can be as simple as: -[source,ruby] - input { - udp { - port => 25826 - buffer_size => 1452 - codec => collectd { } - } - } - -A sample `collectd.conf` to send to Logstash might be: -[source,xml] - Hostname "host.example.com" - LoadPlugin interface - LoadPlugin load - LoadPlugin memory - LoadPlugin network - - Interface "eth0" - IgnoreSelected false - - - - - - -Be sure to replace `10.0.0.1` with the IP of your Logstash instance. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Collectd Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-authfile>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nan_handling>> |<>, one of `["change_value", "warn", "drop"]`|No -| <<{version}-plugins-{type}s-{plugin}-nan_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nan_value>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prune_intervals>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_level>> |<>, one of `["None", "Sign", "Encrypt"]`|No -| <<{version}-plugins-{type}s-{plugin}-typesdb>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-authfile"] -===== `authfile` - - * Value type is <> - * There is no default value for this setting. - -Path to the authentication file. This file should have the same format as -the http://collectd.org/documentation/manpages/collectd.conf.5.shtml#authfile_filename[AuthFile] -in collectd. You only need to set this option if the `security_level` is set to -`Sign` or `Encrypt` - -[id="{version}-plugins-{type}s-{plugin}-nan_handling"] -===== `nan_handling` - - * Value can be any of: `change_value`, `warn`, `drop` - * Default value is `"change_value"` - -What to do when a value in the event is `NaN` (Not a Number) - -- change_value (default): Change the `NaN` to the value of the nan_value option and add `nan_tag` as a tag -- warn: Change the `NaN` to the value of the nan_value option, print a warning to the log and add `nan_tag` as a tag -- drop: Drop the event containing the `NaN` (this only drops the single event, not the whole packet) - -[id="{version}-plugins-{type}s-{plugin}-nan_tag"] -===== `nan_tag` - - * Value type is <> - * Default value is `"_collectdNaN"` - -The tag to add to the event if a `NaN` value was found -Set this to an empty string ('') if you don't want to tag - -[id="{version}-plugins-{type}s-{plugin}-nan_value"] -===== `nan_value` - - * Value type is <> - * Default value is `0` - -Only relevant when `nan_handeling` is set to `change_value` -Change NaN to this configured value - -[id="{version}-plugins-{type}s-{plugin}-prune_intervals"] -===== `prune_intervals` - - * Value type is <> - * Default value is `true` - -Prune interval records. Defaults to `true`. - -[id="{version}-plugins-{type}s-{plugin}-security_level"] -===== `security_level` - - * Value can be any of: `None`, `Sign`, `Encrypt` - * Default value is `"None"` - -Security Level. Default is `None`. This setting mirrors the setting from the -collectd https://collectd.org/wiki/index.php/Plugin:Network[Network plugin] - -[id="{version}-plugins-{type}s-{plugin}-typesdb"] -===== `typesdb` - - * Value type is <> - * There is no default value for this setting. - -File path(s) to collectd `types.db` to use. -The last matching pattern wins if you have identical pattern names in multiple files. -If no types.db is provided the included `types.db` will be used (currently 5.4.0). - - diff --git a/docs/versioned-plugins/codecs/compress_spooler-index.asciidoc b/docs/versioned-plugins/codecs/compress_spooler-index.asciidoc deleted file mode 100644 index 985b344f0..000000000 --- a/docs/versioned-plugins/codecs/compress_spooler-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: compress_spooler -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::compress_spooler-v2.0.6.asciidoc[] -include::compress_spooler-v2.0.5.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/compress_spooler-v2.0.5.asciidoc b/docs/versioned-plugins/codecs/compress_spooler-v2.0.5.asciidoc deleted file mode 100644 index 1984e58f2..000000000 --- a/docs/versioned-plugins/codecs/compress_spooler-v2.0.5.asciidoc +++ /dev/null @@ -1,64 +0,0 @@ -:plugin: compress_spooler -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-compress_spooler/blob/v2.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Compress_spooler codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Compress_spooler Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-compress_level>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-min_flush_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-spool_size>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-compress_level"] -===== `compress_level` - - * Value type is <> - * Default value is `6` - - - -[id="{version}-plugins-{type}s-{plugin}-min_flush_time"] -===== `min_flush_time` - - * Value type is <> - * Default value is `0` - -The amount of time in seconds since last flush before a flush is forced, -on the next event. -Values smaller than 0 disables time based flushing. - -[id="{version}-plugins-{type}s-{plugin}-spool_size"] -===== `spool_size` - - * Value type is <> - * Default value is `50` - - - - diff --git a/docs/versioned-plugins/codecs/compress_spooler-v2.0.6.asciidoc b/docs/versioned-plugins/codecs/compress_spooler-v2.0.6.asciidoc deleted file mode 100644 index 2ec27c0b8..000000000 --- a/docs/versioned-plugins/codecs/compress_spooler-v2.0.6.asciidoc +++ /dev/null @@ -1,64 +0,0 @@ -:plugin: compress_spooler -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.6 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-compress_spooler/blob/v2.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Compress_spooler codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Compress_spooler Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-compress_level>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-min_flush_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-spool_size>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-compress_level"] -===== `compress_level` - - * Value type is <> - * Default value is `6` - - - -[id="{version}-plugins-{type}s-{plugin}-min_flush_time"] -===== `min_flush_time` - - * Value type is <> - * Default value is `0` - -The amount of time in seconds since last flush before a flush is forced, -on the next event. -Values smaller than 0 disables time based flushing. - -[id="{version}-plugins-{type}s-{plugin}-spool_size"] -===== `spool_size` - - * Value type is <> - * Default value is `50` - - - - diff --git a/docs/versioned-plugins/codecs/csv-index.asciidoc b/docs/versioned-plugins/codecs/csv-index.asciidoc deleted file mode 100644 index 431aa2609..000000000 --- a/docs/versioned-plugins/codecs/csv-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: csv -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::csv-v0.1.4.asciidoc[] -include::csv-v0.1.3.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/csv-v0.1.3.asciidoc b/docs/versioned-plugins/codecs/csv-v0.1.3.asciidoc deleted file mode 100644 index 16c9c2efd..000000000 --- a/docs/versioned-plugins/codecs/csv-v0.1.3.asciidoc +++ /dev/null @@ -1,132 +0,0 @@ -:plugin: csv -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v0.1.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-csv/blob/v0.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Csv codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Csv Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-autogenerate_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-columns>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-quote_char>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-skip_empty_columns>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-autogenerate_column_names"] -===== `autogenerate_column_names` - - * Value type is <> - * Default value is `true` - -Define whether column names should autogenerated or not. -Defaults to true. If set to false, columns not having a header specified will not be parsed. - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -List of valid conversion types used for the convert option -The character encoding used in this codec. Examples include "UTF-8" and -"CP1252". - -[id="{version}-plugins-{type}s-{plugin}-columns"] -===== `columns` - - * Value type is <> - * Default value is `[]` - -Define a list of column names (in the order they appear in the CSV, -as if it were a header line). If `columns` is not configured, or there -are not enough columns specified, the default column names are -"column1", "column2", etc. In the case that there are more columns -in the data than specified in this column list, extra columns will be auto-numbered: -(e.g. "user_defined_1", "user_defined_2", "column3", "column4", etc.) - -[id="{version}-plugins-{type}s-{plugin}-convert"] -===== `convert` - - * Value type is <> - * Default value is `{}` - -Define a set of datatype conversions to be applied to columns. -Possible conversions are integer, float, date, date_time, boolean - -# Example: -[source,ruby] - filter { - csv { - convert => { "column1" => "integer", "column2" => "boolean" } - } - } - -[id="{version}-plugins-{type}s-{plugin}-include_headers"] -===== `include_headers` - - * Value type is <> - * Default value is `false` - -Treats the first line received as the hearder information, this information will -be used to compose the field names in the generated events. Note this information can -be reset on demand, useful for example when dealing with new files in the file input -or new request in the http_poller. Default => false - -[id="{version}-plugins-{type}s-{plugin}-quote_char"] -===== `quote_char` - - * Value type is <> - * Default value is `"\""` - -Define the character used to quote CSV fields. If this is not specified -the default is a double quote `"`. -Optional. - -[id="{version}-plugins-{type}s-{plugin}-separator"] -===== `separator` - - * Value type is <> - * Default value is `","` - -Define the column separator value. If this is not specified, the default -is a comma `,`. -Optional. - -[id="{version}-plugins-{type}s-{plugin}-skip_empty_columns"] -===== `skip_empty_columns` - - * Value type is <> - * Default value is `false` - -Define whether empty columns should be skipped. -Defaults to false. If set to true, columns containing no value will not get set. - - diff --git a/docs/versioned-plugins/codecs/csv-v0.1.4.asciidoc b/docs/versioned-plugins/codecs/csv-v0.1.4.asciidoc deleted file mode 100644 index ad1693a62..000000000 --- a/docs/versioned-plugins/codecs/csv-v0.1.4.asciidoc +++ /dev/null @@ -1,132 +0,0 @@ -:plugin: csv -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v0.1.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-csv/blob/v0.1.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Csv codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Csv Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-autogenerate_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-columns>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-quote_char>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-skip_empty_columns>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-autogenerate_column_names"] -===== `autogenerate_column_names` - - * Value type is <> - * Default value is `true` - -Define whether column names should autogenerated or not. -Defaults to true. If set to false, columns not having a header specified will not be parsed. - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -List of valid conversion types used for the convert option -The character encoding used in this codec. Examples include "UTF-8" and -"CP1252". - -[id="{version}-plugins-{type}s-{plugin}-columns"] -===== `columns` - - * Value type is <> - * Default value is `[]` - -Define a list of column names (in the order they appear in the CSV, -as if it were a header line). If `columns` is not configured, or there -are not enough columns specified, the default column names are -"column1", "column2", etc. In the case that there are more columns -in the data than specified in this column list, extra columns will be auto-numbered: -(e.g. "user_defined_1", "user_defined_2", "column3", "column4", etc.) - -[id="{version}-plugins-{type}s-{plugin}-convert"] -===== `convert` - - * Value type is <> - * Default value is `{}` - -Define a set of datatype conversions to be applied to columns. -Possible conversions are integer, float, date, date_time, boolean - -# Example: -[source,ruby] - filter { - csv { - convert => { "column1" => "integer", "column2" => "boolean" } - } - } - -[id="{version}-plugins-{type}s-{plugin}-include_headers"] -===== `include_headers` - - * Value type is <> - * Default value is `false` - -Treats the first line received as the hearder information, this information will -be used to compose the field names in the generated events. Note this information can -be reset on demand, useful for example when dealing with new files in the file input -or new request in the http_poller. Default => false - -[id="{version}-plugins-{type}s-{plugin}-quote_char"] -===== `quote_char` - - * Value type is <> - * Default value is `"\""` - -Define the character used to quote CSV fields. If this is not specified -the default is a double quote `"`. -Optional. - -[id="{version}-plugins-{type}s-{plugin}-separator"] -===== `separator` - - * Value type is <> - * Default value is `","` - -Define the column separator value. If this is not specified, the default -is a comma `,`. -Optional. - -[id="{version}-plugins-{type}s-{plugin}-skip_empty_columns"] -===== `skip_empty_columns` - - * Value type is <> - * Default value is `false` - -Define whether empty columns should be skipped. -Defaults to false. If set to true, columns containing no value will not get set. - - diff --git a/docs/versioned-plugins/codecs/dots-index.asciidoc b/docs/versioned-plugins/codecs/dots-index.asciidoc deleted file mode 100644 index 8bffee66c..000000000 --- a/docs/versioned-plugins/codecs/dots-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: dots -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::dots-v3.0.6.asciidoc[] -include::dots-v3.0.5.asciidoc[] -include::dots-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/dots-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/dots-v3.0.3.asciidoc deleted file mode 100644 index 06c3bdbae..000000000 --- a/docs/versioned-plugins/codecs/dots-v3.0.3.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -:plugin: dots -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-dots/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Dots codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Dots Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -|======================================================================= diff --git a/docs/versioned-plugins/codecs/dots-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/dots-v3.0.5.asciidoc deleted file mode 100644 index 6e36ff916..000000000 --- a/docs/versioned-plugins/codecs/dots-v3.0.5.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -:plugin: dots -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-dots/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Dots codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec generates a dot(`.`) to represent each Event it processes. This is typically used with `stdout` output to provide feedback on the terminal. It is also used to measure Logstash's throughtput with the `pv` command. diff --git a/docs/versioned-plugins/codecs/dots-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/dots-v3.0.6.asciidoc deleted file mode 100644 index f217f753f..000000000 --- a/docs/versioned-plugins/codecs/dots-v3.0.6.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -:plugin: dots -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-dots/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Dots codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec generates a dot(`.`) to represent each Event it processes. This is typically used with `stdout` output to provide feedback on the terminal. It is also used to measure Logstash's throughtput with the `pv` command. diff --git a/docs/versioned-plugins/codecs/edn-index.asciidoc b/docs/versioned-plugins/codecs/edn-index.asciidoc deleted file mode 100644 index 58f4d4369..000000000 --- a/docs/versioned-plugins/codecs/edn-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: edn -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::edn-v3.0.6.asciidoc[] -include::edn-v3.0.5.asciidoc[] -include::edn-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/edn-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/edn-v3.0.3.asciidoc deleted file mode 100644 index 8c779c5d7..000000000 --- a/docs/versioned-plugins/codecs/edn-v3.0.3.asciidoc +++ /dev/null @@ -1,33 +0,0 @@ -:plugin: edn -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-edn/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Edn codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Edn Codec Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -|======================================================================= diff --git a/docs/versioned-plugins/codecs/edn-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/edn-v3.0.5.asciidoc deleted file mode 100644 index 6afee1e83..000000000 --- a/docs/versioned-plugins/codecs/edn-v3.0.5.asciidoc +++ /dev/null @@ -1,25 +0,0 @@ -:plugin: edn -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-edn/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Edn codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Reads and produces EDN format data. - - diff --git a/docs/versioned-plugins/codecs/edn-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/edn-v3.0.6.asciidoc deleted file mode 100644 index d58ac23dc..000000000 --- a/docs/versioned-plugins/codecs/edn-v3.0.6.asciidoc +++ /dev/null @@ -1,25 +0,0 @@ -:plugin: edn -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-edn/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Edn codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Reads and produces EDN format data. - - diff --git a/docs/versioned-plugins/codecs/edn_lines-index.asciidoc b/docs/versioned-plugins/codecs/edn_lines-index.asciidoc deleted file mode 100644 index 9656a0df3..000000000 --- a/docs/versioned-plugins/codecs/edn_lines-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: edn_lines -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::edn_lines-v3.0.6.asciidoc[] -include::edn_lines-v3.0.5.asciidoc[] -include::edn_lines-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/edn_lines-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/edn_lines-v3.0.3.asciidoc deleted file mode 100644 index 952290275..000000000 --- a/docs/versioned-plugins/codecs/edn_lines-v3.0.3.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -:plugin: edn_lines -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-edn_lines/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Edn_lines codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Edn_lines Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -|======================================================================= diff --git a/docs/versioned-plugins/codecs/edn_lines-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/edn_lines-v3.0.5.asciidoc deleted file mode 100644 index 759e6b462..000000000 --- a/docs/versioned-plugins/codecs/edn_lines-v3.0.5.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -:plugin: edn_lines -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-edn_lines/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Edn_lines codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Reads and produces newline-delimited EDN format data. diff --git a/docs/versioned-plugins/codecs/edn_lines-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/edn_lines-v3.0.6.asciidoc deleted file mode 100644 index 56e5802b7..000000000 --- a/docs/versioned-plugins/codecs/edn_lines-v3.0.6.asciidoc +++ /dev/null @@ -1,23 +0,0 @@ -:plugin: edn_lines -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-edn_lines/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Edn_lines codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Reads and produces newline-delimited EDN format data. diff --git a/docs/versioned-plugins/codecs/es_bulk-index.asciidoc b/docs/versioned-plugins/codecs/es_bulk-index.asciidoc deleted file mode 100644 index faba473ef..000000000 --- a/docs/versioned-plugins/codecs/es_bulk-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: es_bulk -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::es_bulk-v3.0.6.asciidoc[] -include::es_bulk-v3.0.5.asciidoc[] -include::es_bulk-v3.0.4.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/es_bulk-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/es_bulk-v3.0.4.asciidoc deleted file mode 100644 index 2cd33cb27..000000000 --- a/docs/versioned-plugins/codecs/es_bulk-v3.0.4.asciidoc +++ /dev/null @@ -1,35 +0,0 @@ -:plugin: es_bulk -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-es_bulk/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Es_bulk codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec will decode the http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk format] -into individual events, plus metadata into the `@metadata` field. - -Encoding is not supported at this time as the Elasticsearch -output submits Logstash events in bulk format. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Es_bulk Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -|======================================================================= diff --git a/docs/versioned-plugins/codecs/es_bulk-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/es_bulk-v3.0.5.asciidoc deleted file mode 100644 index 53c00a939..000000000 --- a/docs/versioned-plugins/codecs/es_bulk-v3.0.5.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -:plugin: es_bulk -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-es_bulk/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Es_bulk codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec will decode the http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk format] -into individual events, plus metadata into the `@metadata` field. - -Encoding is not supported at this time as the Elasticsearch -output submits Logstash events in bulk format. - diff --git a/docs/versioned-plugins/codecs/es_bulk-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/es_bulk-v3.0.6.asciidoc deleted file mode 100644 index 07588b3e4..000000000 --- a/docs/versioned-plugins/codecs/es_bulk-v3.0.6.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -:plugin: es_bulk -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-es_bulk/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Es_bulk codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec will decode the http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk format] -into individual events, plus metadata into the `@metadata` field. - -Encoding is not supported at this time as the Elasticsearch -output submits Logstash events in bulk format. - diff --git a/docs/versioned-plugins/codecs/example-index.asciidoc b/docs/versioned-plugins/codecs/example-index.asciidoc deleted file mode 100644 index cf9b9a19d..000000000 --- a/docs/versioned-plugins/codecs/example-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: example -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/codecs/fluent-index.asciidoc b/docs/versioned-plugins/codecs/fluent-index.asciidoc deleted file mode 100644 index 84403dcac..000000000 --- a/docs/versioned-plugins/codecs/fluent-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: fluent -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-10-12 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::fluent-v3.1.5.asciidoc[] -include::fluent-v3.1.4.asciidoc[] -include::fluent-v3.1.3.asciidoc[] -include::fluent-v3.1.2.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/fluent-v3.1.2.asciidoc b/docs/versioned-plugins/codecs/fluent-v3.1.2.asciidoc deleted file mode 100644 index 18ed83985..000000000 --- a/docs/versioned-plugins/codecs/fluent-v3.1.2.asciidoc +++ /dev/null @@ -1,51 +0,0 @@ -:plugin: fluent -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-fluent/blob/v3.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Fluent codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec handles fluentd's msgpack schema. - -For example, you can receive logs from `fluent-logger-ruby` with: -[source,ruby] - input { - tcp { - codec => fluent - port => 4000 - } - } - -And from your ruby code in your own application: -[source,ruby] - logger = Fluent::Logger::FluentLogger.new(nil, :host => "example.log", :port => 4000) - logger.post("some_tag", { "your" => "data", "here" => "yay!" }) - -Notes: - -* the fluent uses a second-precision time for events, so you will never see - subsecond precision on events processed by this codec. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Fluent Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -|======================================================================= diff --git a/docs/versioned-plugins/codecs/fluent-v3.1.3.asciidoc b/docs/versioned-plugins/codecs/fluent-v3.1.3.asciidoc deleted file mode 100644 index aae0a595c..000000000 --- a/docs/versioned-plugins/codecs/fluent-v3.1.3.asciidoc +++ /dev/null @@ -1,43 +0,0 @@ -:plugin: fluent -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-fluent/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Fluent codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec handles fluentd's msgpack schema. - -For example, you can receive logs from `fluent-logger-ruby` with: -[source,ruby] - input { - tcp { - codec => fluent - port => 4000 - } - } - -And from your ruby code in your own application: -[source,ruby] - logger = Fluent::Logger::FluentLogger.new(nil, :host => "example.log", :port => 4000) - logger.post("some_tag", { "your" => "data", "here" => "yay!" }) - -Notes: - -* the fluent uses a second-precision time for events, so you will never see - subsecond precision on events processed by this codec. - diff --git a/docs/versioned-plugins/codecs/fluent-v3.1.4.asciidoc b/docs/versioned-plugins/codecs/fluent-v3.1.4.asciidoc deleted file mode 100644 index 6aa1d9218..000000000 --- a/docs/versioned-plugins/codecs/fluent-v3.1.4.asciidoc +++ /dev/null @@ -1,43 +0,0 @@ -:plugin: fluent -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.4 -:release_date: 2017-10-12 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-fluent/blob/v3.1.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Fluent codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec handles fluentd's msgpack schema. - -For example, you can receive logs from `fluent-logger-ruby` with: -[source,ruby] - input { - tcp { - codec => fluent - port => 4000 - } - } - -And from your ruby code in your own application: -[source,ruby] - logger = Fluent::Logger::FluentLogger.new(nil, :host => "example.log", :port => 4000) - logger.post("some_tag", { "your" => "data", "here" => "yay!" }) - -Notes: - -* the fluent uses a second-precision time for events, so you will never see - subsecond precision on events processed by this codec. - diff --git a/docs/versioned-plugins/codecs/fluent-v3.1.5.asciidoc b/docs/versioned-plugins/codecs/fluent-v3.1.5.asciidoc deleted file mode 100644 index 6a1d3aec2..000000000 --- a/docs/versioned-plugins/codecs/fluent-v3.1.5.asciidoc +++ /dev/null @@ -1,43 +0,0 @@ -:plugin: fluent -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-fluent/blob/v3.1.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Fluent codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec handles fluentd's msgpack schema. - -For example, you can receive logs from `fluent-logger-ruby` with: -[source,ruby] - input { - tcp { - codec => fluent - port => 4000 - } - } - -And from your ruby code in your own application: -[source,ruby] - logger = Fluent::Logger::FluentLogger.new(nil, :host => "example.log", :port => 4000) - logger.post("some_tag", { "your" => "data", "here" => "yay!" }) - -Notes: - -* the fluent uses a second-precision time for events, so you will never see - subsecond precision on events processed by this codec. - diff --git a/docs/versioned-plugins/codecs/graphite-index.asciidoc b/docs/versioned-plugins/codecs/graphite-index.asciidoc deleted file mode 100644 index 75287d628..000000000 --- a/docs/versioned-plugins/codecs/graphite-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: graphite -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::graphite-v3.0.5.asciidoc[] -include::graphite-v3.0.4.asciidoc[] -include::graphite-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/graphite-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/graphite-v3.0.3.asciidoc deleted file mode 100644 index 911a801f7..000000000 --- a/docs/versioned-plugins/codecs/graphite-v3.0.3.asciidoc +++ /dev/null @@ -1,93 +0,0 @@ -:plugin: graphite -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-graphite/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Graphite codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec will encode and decode Graphite formated lines. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Graphite Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-exclude_metrics"] -===== `exclude_metrics` - - * Value type is <> - * Default value is `["%{[^}]+}"]` - -Exclude regex matched metric names, by default exclude unresolved %{field} strings - -[id="{version}-plugins-{type}s-{plugin}-fields_are_metrics"] -===== `fields_are_metrics` - - * Value type is <> - * Default value is `false` - -Indicate that the event @fields should be treated as metrics and will be sent as is to graphite - -[id="{version}-plugins-{type}s-{plugin}-include_metrics"] -===== `include_metrics` - - * Value type is <> - * Default value is `[".*"]` - -Include only regex matched metric names - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * Value type is <> - * Default value is `{}` - -The metric(s) to use. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. Example: -[source,ruby] - [ "%{host}/uptime", "%{uptime_1m}" ] - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-metrics_format"] -===== `metrics_format` - - * Value type is <> - * Default value is `"*"` - -Defines format of the metric string. The placeholder `*` will be -replaced with the name of the actual metric. This supports dynamic -strings like `%{host}`. -[source,ruby] - metrics_format => "%{host}.foo.bar.*.sum" - -NOTE: If no metrics_format is defined the name of the metric will be used as fallback. - - diff --git a/docs/versioned-plugins/codecs/graphite-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/graphite-v3.0.4.asciidoc deleted file mode 100644 index 7674051cc..000000000 --- a/docs/versioned-plugins/codecs/graphite-v3.0.4.asciidoc +++ /dev/null @@ -1,93 +0,0 @@ -:plugin: graphite -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-graphite/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Graphite codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec will encode and decode Graphite formated lines. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Graphite Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-exclude_metrics"] -===== `exclude_metrics` - - * Value type is <> - * Default value is `["%{[^}]+}"]` - -Exclude regex matched metric names, by default exclude unresolved %{field} strings - -[id="{version}-plugins-{type}s-{plugin}-fields_are_metrics"] -===== `fields_are_metrics` - - * Value type is <> - * Default value is `false` - -Indicate that the event @fields should be treated as metrics and will be sent as is to graphite - -[id="{version}-plugins-{type}s-{plugin}-include_metrics"] -===== `include_metrics` - - * Value type is <> - * Default value is `[".*"]` - -Include only regex matched metric names - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * Value type is <> - * Default value is `{}` - -The metric(s) to use. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. Example: -[source,ruby] - [ "%{host}/uptime", "%{uptime_1m}" ] - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-metrics_format"] -===== `metrics_format` - - * Value type is <> - * Default value is `"*"` - -Defines format of the metric string. The placeholder `*` will be -replaced with the name of the actual metric. This supports dynamic -strings like `%{host}`. -[source,ruby] - metrics_format => "%{host}.foo.bar.*.sum" - -NOTE: If no metrics_format is defined the name of the metric will be used as fallback. - - diff --git a/docs/versioned-plugins/codecs/graphite-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/graphite-v3.0.5.asciidoc deleted file mode 100644 index e61023f0d..000000000 --- a/docs/versioned-plugins/codecs/graphite-v3.0.5.asciidoc +++ /dev/null @@ -1,93 +0,0 @@ -:plugin: graphite -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-graphite/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Graphite codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec will encode and decode Graphite formated lines. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Graphite Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-exclude_metrics"] -===== `exclude_metrics` - - * Value type is <> - * Default value is `["%{[^}]+}"]` - -Exclude regex matched metric names, by default exclude unresolved %{field} strings - -[id="{version}-plugins-{type}s-{plugin}-fields_are_metrics"] -===== `fields_are_metrics` - - * Value type is <> - * Default value is `false` - -Indicate that the event @fields should be treated as metrics and will be sent as is to graphite - -[id="{version}-plugins-{type}s-{plugin}-include_metrics"] -===== `include_metrics` - - * Value type is <> - * Default value is `[".*"]` - -Include only regex matched metric names - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * Value type is <> - * Default value is `{}` - -The metric(s) to use. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. Example: -[source,ruby] - [ "%{host}/uptime", "%{uptime_1m}" ] - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-metrics_format"] -===== `metrics_format` - - * Value type is <> - * Default value is `"*"` - -Defines format of the metric string. The placeholder `*` will be -replaced with the name of the actual metric. This supports dynamic -strings like `%{host}`. -[source,ruby] - metrics_format => "%{host}.foo.bar.*.sum" - -NOTE: If no metrics_format is defined the name of the metric will be used as fallback. - - diff --git a/docs/versioned-plugins/codecs/gzip_lines-index.asciidoc b/docs/versioned-plugins/codecs/gzip_lines-index.asciidoc deleted file mode 100644 index 64a11da62..000000000 --- a/docs/versioned-plugins/codecs/gzip_lines-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: gzip_lines -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -| <> | 2017-06-19 -|======================================================================= - -include::gzip_lines-v3.0.3.asciidoc[] -include::gzip_lines-v3.0.2.asciidoc[] -include::gzip_lines-v3.0.1.asciidoc[] -include::gzip_lines-v3.0.0.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/gzip_lines-v3.0.0.asciidoc b/docs/versioned-plugins/codecs/gzip_lines-v3.0.0.asciidoc deleted file mode 100644 index 20575bac8..000000000 --- a/docs/versioned-plugins/codecs/gzip_lines-v3.0.0.asciidoc +++ /dev/null @@ -1,59 +0,0 @@ -:plugin: gzip_lines -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.0 -:release_date: 2017-06-19 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-gzip_lines/blob/v3.0.0/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Gzip_lines codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec will read gzip encoded content - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Gzip_lines Codec Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -|======================================================================= - -Also see <> for a list of options supported by all -codec plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this codec. Examples include "UTF-8" and -"CP1252" - -JSON requires valid UTF-8 strings, but in some cases, software that -emits JSON does so in another encoding (nxlog, for example). In -weird cases like this, you can set the charset setting to the -actual encoding of the text and logstash will convert it for you. - -For nxlog users, you'll want to set this to "CP1252" - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/codecs/gzip_lines-v3.0.1.asciidoc b/docs/versioned-plugins/codecs/gzip_lines-v3.0.1.asciidoc deleted file mode 100644 index 326cd59a4..000000000 --- a/docs/versioned-plugins/codecs/gzip_lines-v3.0.1.asciidoc +++ /dev/null @@ -1,52 +0,0 @@ -:plugin: gzip_lines -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-gzip_lines/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Gzip_lines codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec will read gzip encoded content - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Gzip_lines Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this codec. Examples include "UTF-8" and -"CP1252" - -JSON requires valid UTF-8 strings, but in some cases, software that -emits JSON does so in another encoding (nxlog, for example). In -weird cases like this, you can set the charset setting to the -actual encoding of the text and logstash will convert it for you. - -For nxlog users, you'll want to set this to "CP1252" - - diff --git a/docs/versioned-plugins/codecs/gzip_lines-v3.0.2.asciidoc b/docs/versioned-plugins/codecs/gzip_lines-v3.0.2.asciidoc deleted file mode 100644 index 3a3d810b0..000000000 --- a/docs/versioned-plugins/codecs/gzip_lines-v3.0.2.asciidoc +++ /dev/null @@ -1,52 +0,0 @@ -:plugin: gzip_lines -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-gzip_lines/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Gzip_lines codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec will read gzip encoded content - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Gzip_lines Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this codec. Examples include "UTF-8" and -"CP1252" - -JSON requires valid UTF-8 strings, but in some cases, software that -emits JSON does so in another encoding (nxlog, for example). In -weird cases like this, you can set the charset setting to the -actual encoding of the text and logstash will convert it for you. - -For nxlog users, you'll want to set this to "CP1252" - - diff --git a/docs/versioned-plugins/codecs/gzip_lines-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/gzip_lines-v3.0.3.asciidoc deleted file mode 100644 index c56951293..000000000 --- a/docs/versioned-plugins/codecs/gzip_lines-v3.0.3.asciidoc +++ /dev/null @@ -1,52 +0,0 @@ -:plugin: gzip_lines -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-gzip_lines/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Gzip_lines codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec will read gzip encoded content - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Gzip_lines Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this codec. Examples include "UTF-8" and -"CP1252" - -JSON requires valid UTF-8 strings, but in some cases, software that -emits JSON does so in another encoding (nxlog, for example). In -weird cases like this, you can set the charset setting to the -actual encoding of the text and logstash will convert it for you. - -For nxlog users, you'll want to set this to "CP1252" - - diff --git a/docs/versioned-plugins/codecs/json_lines-index.asciidoc b/docs/versioned-plugins/codecs/json_lines-index.asciidoc deleted file mode 100644 index 544c5fc48..000000000 --- a/docs/versioned-plugins/codecs/json_lines-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: json_lines -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::json_lines-v3.0.5.asciidoc[] -include::json_lines-v3.0.4.asciidoc[] -include::json_lines-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/json_lines-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/json_lines-v3.0.3.asciidoc deleted file mode 100644 index e6d322800..000000000 --- a/docs/versioned-plugins/codecs/json_lines-v3.0.3.asciidoc +++ /dev/null @@ -1,67 +0,0 @@ -:plugin: json_lines -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-json_lines/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Json_lines codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec will decode streamed JSON that is newline delimited. -Encoding will emit a single JSON string ending in a `@delimiter` -NOTE: Do not use this codec if your source input is line-oriented JSON, for -example, redis or file inputs. Rather, use the json codec. -More info: This codec is expecting to receive a stream (string) of newline -terminated lines. The file input will produce a line string without a newline. -Therefore this codec cannot work with line oriented inputs. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Json_lines Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this codec. Examples include `UTF-8` and -`CP1252` - -JSON requires valid `UTF-8` strings, but in some cases, software that -emits JSON does so in another encoding (nxlog, for example). In -weird cases like this, you can set the charset setting to the -actual encoding of the text and logstash will convert it for you. - -For nxlog users, you'll want to set this to `CP1252` - -[id="{version}-plugins-{type}s-{plugin}-delimiter"] -===== `delimiter` - - * Value type is <> - * Default value is `"\n"` - -Change the delimiter that separates lines - - diff --git a/docs/versioned-plugins/codecs/json_lines-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/json_lines-v3.0.4.asciidoc deleted file mode 100644 index 2d8b0df7f..000000000 --- a/docs/versioned-plugins/codecs/json_lines-v3.0.4.asciidoc +++ /dev/null @@ -1,67 +0,0 @@ -:plugin: json_lines -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-json_lines/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Json_lines codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec will decode streamed JSON that is newline delimited. -Encoding will emit a single JSON string ending in a `@delimiter` -NOTE: Do not use this codec if your source input is line-oriented JSON, for -example, redis or file inputs. Rather, use the json codec. -More info: This codec is expecting to receive a stream (string) of newline -terminated lines. The file input will produce a line string without a newline. -Therefore this codec cannot work with line oriented inputs. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Json_lines Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this codec. Examples include `UTF-8` and -`CP1252` - -JSON requires valid `UTF-8` strings, but in some cases, software that -emits JSON does so in another encoding (nxlog, for example). In -weird cases like this, you can set the charset setting to the -actual encoding of the text and logstash will convert it for you. - -For nxlog users, you'll want to set this to `CP1252` - -[id="{version}-plugins-{type}s-{plugin}-delimiter"] -===== `delimiter` - - * Value type is <> - * Default value is `"\n"` - -Change the delimiter that separates lines - - diff --git a/docs/versioned-plugins/codecs/json_lines-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/json_lines-v3.0.5.asciidoc deleted file mode 100644 index f8031bc0b..000000000 --- a/docs/versioned-plugins/codecs/json_lines-v3.0.5.asciidoc +++ /dev/null @@ -1,67 +0,0 @@ -:plugin: json_lines -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-json_lines/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Json_lines codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec will decode streamed JSON that is newline delimited. -Encoding will emit a single JSON string ending in a `@delimiter` -NOTE: Do not use this codec if your source input is line-oriented JSON, for -example, redis or file inputs. Rather, use the json codec. -More info: This codec is expecting to receive a stream (string) of newline -terminated lines. The file input will produce a line string without a newline. -Therefore this codec cannot work with line oriented inputs. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Json_lines Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this codec. Examples include `UTF-8` and -`CP1252` - -JSON requires valid `UTF-8` strings, but in some cases, software that -emits JSON does so in another encoding (nxlog, for example). In -weird cases like this, you can set the charset setting to the -actual encoding of the text and logstash will convert it for you. - -For nxlog users, you'll want to set this to `CP1252` - -[id="{version}-plugins-{type}s-{plugin}-delimiter"] -===== `delimiter` - - * Value type is <> - * Default value is `"\n"` - -Change the delimiter that separates lines - - diff --git a/docs/versioned-plugins/codecs/json_pretty-index.asciidoc b/docs/versioned-plugins/codecs/json_pretty-index.asciidoc deleted file mode 100644 index 7ee81c2f9..000000000 --- a/docs/versioned-plugins/codecs/json_pretty-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: json_pretty -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/codecs/line-index.asciidoc b/docs/versioned-plugins/codecs/line-index.asciidoc deleted file mode 100644 index 2b4cd9dd4..000000000 --- a/docs/versioned-plugins/codecs/line-index.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -:plugin: line -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-12-19 -| <> | 2017-12-15 -| <> | 2017-12-12 -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::line-v3.0.8.asciidoc[] -include::line-v3.0.7.asciidoc[] -include::line-v3.0.6.asciidoc[] -include::line-v3.0.5.asciidoc[] -include::line-v3.0.4.asciidoc[] -include::line-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/line-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/line-v3.0.3.asciidoc deleted file mode 100644 index 3610e02c7..000000000 --- a/docs/versioned-plugins/codecs/line-v3.0.3.asciidoc +++ /dev/null @@ -1,72 +0,0 @@ -:plugin: line -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-line/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Line codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Line-oriented text data. - -Decoding behavior: Only whole line events will be emitted. - -Encoding behavior: Each event will be emitted with a trailing newline. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Line Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this input. Examples include `UTF-8` -and `cp1252` - -This setting is useful if your log files are in `Latin-1` (aka `cp1252`) -or in another character set other than `UTF-8`. - -This only affects "plain" format logs since json is `UTF-8` already. - -[id="{version}-plugins-{type}s-{plugin}-delimiter"] -===== `delimiter` - - * Value type is <> - * Default value is `"\n"` - -Change the delimiter that separates lines - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * There is no default value for this setting. - -Set the desired text format for encoding. - - diff --git a/docs/versioned-plugins/codecs/line-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/line-v3.0.4.asciidoc deleted file mode 100644 index 3fd05cd70..000000000 --- a/docs/versioned-plugins/codecs/line-v3.0.4.asciidoc +++ /dev/null @@ -1,72 +0,0 @@ -:plugin: line -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-line/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Line codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Line-oriented text data. - -Decoding behavior: Only whole line events will be emitted. - -Encoding behavior: Each event will be emitted with a trailing newline. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Line Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this input. Examples include `UTF-8` -and `cp1252` - -This setting is useful if your log files are in `Latin-1` (aka `cp1252`) -or in another character set other than `UTF-8`. - -This only affects "plain" format logs since json is `UTF-8` already. - -[id="{version}-plugins-{type}s-{plugin}-delimiter"] -===== `delimiter` - - * Value type is <> - * Default value is `"\n"` - -Change the delimiter that separates lines - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * There is no default value for this setting. - -Set the desired text format for encoding. - - diff --git a/docs/versioned-plugins/codecs/line-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/line-v3.0.5.asciidoc deleted file mode 100644 index af48f373b..000000000 --- a/docs/versioned-plugins/codecs/line-v3.0.5.asciidoc +++ /dev/null @@ -1,72 +0,0 @@ -:plugin: line -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-line/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Line codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Line-oriented text data. - -Decoding behavior: Only whole line events will be emitted. - -Encoding behavior: Each event will be emitted with a trailing newline. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Line Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this input. Examples include `UTF-8` -and `cp1252` - -This setting is useful if your log files are in `Latin-1` (aka `cp1252`) -or in another character set other than `UTF-8`. - -This only affects "plain" format logs since json is `UTF-8` already. - -[id="{version}-plugins-{type}s-{plugin}-delimiter"] -===== `delimiter` - - * Value type is <> - * Default value is `"\n"` - -Change the delimiter that separates lines - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * There is no default value for this setting. - -Set the desired text format for encoding. - - diff --git a/docs/versioned-plugins/codecs/line-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/line-v3.0.6.asciidoc deleted file mode 100644 index f290efd15..000000000 --- a/docs/versioned-plugins/codecs/line-v3.0.6.asciidoc +++ /dev/null @@ -1,72 +0,0 @@ -:plugin: line -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-12-12 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-line/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Line codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Line-oriented text data. - -Decoding behavior: Only whole line events will be emitted. - -Encoding behavior: Each event will be emitted with a trailing newline. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Line Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this input. Examples include `UTF-8` -and `cp1252` - -This setting is useful if your log files are in `Latin-1` (aka `cp1252`) -or in another character set other than `UTF-8`. - -This only affects "plain" format logs since json is `UTF-8` already. - -[id="{version}-plugins-{type}s-{plugin}-delimiter"] -===== `delimiter` - - * Value type is <> - * Default value is `"\n"` - -Change the delimiter that separates lines - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * There is no default value for this setting. - -Set the desired text format for encoding. - - diff --git a/docs/versioned-plugins/codecs/line-v3.0.7.asciidoc b/docs/versioned-plugins/codecs/line-v3.0.7.asciidoc deleted file mode 100644 index 72d714510..000000000 --- a/docs/versioned-plugins/codecs/line-v3.0.7.asciidoc +++ /dev/null @@ -1,72 +0,0 @@ -:plugin: line -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.7 -:release_date: 2017-12-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-line/blob/v3.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Line codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Line-oriented text data. - -Decoding behavior: Only whole line events will be emitted. - -Encoding behavior: Each event will be emitted with a trailing newline. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Line Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this input. Examples include `UTF-8` -and `cp1252` - -This setting is useful if your log files are in `Latin-1` (aka `cp1252`) -or in another character set other than `UTF-8`. - -This only affects "plain" format logs since json is `UTF-8` already. - -[id="{version}-plugins-{type}s-{plugin}-delimiter"] -===== `delimiter` - - * Value type is <> - * Default value is `"\n"` - -Change the delimiter that separates lines - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * There is no default value for this setting. - -Set the desired text format for encoding. - - diff --git a/docs/versioned-plugins/codecs/line-v3.0.8.asciidoc b/docs/versioned-plugins/codecs/line-v3.0.8.asciidoc deleted file mode 100644 index 2627ec8ad..000000000 --- a/docs/versioned-plugins/codecs/line-v3.0.8.asciidoc +++ /dev/null @@ -1,72 +0,0 @@ -:plugin: line -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.8 -:release_date: 2017-12-19 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-line/blob/v3.0.8/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Line codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Line-oriented text data. - -Decoding behavior: Only whole line events will be emitted. - -Encoding behavior: Each event will be emitted with a trailing newline. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Line Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this input. Examples include `UTF-8` -and `cp1252` - -This setting is useful if your log files are in `Latin-1` (aka `cp1252`) -or in another character set other than `UTF-8`. - -This only affects "plain" format logs since json is `UTF-8` already. - -[id="{version}-plugins-{type}s-{plugin}-delimiter"] -===== `delimiter` - - * Value type is <> - * Default value is `"\n"` - -Change the delimiter that separates lines - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * There is no default value for this setting. - -Set the desired text format for encoding. - - diff --git a/docs/versioned-plugins/codecs/msgpack-index.asciidoc b/docs/versioned-plugins/codecs/msgpack-index.asciidoc deleted file mode 100644 index 893300d30..000000000 --- a/docs/versioned-plugins/codecs/msgpack-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: msgpack -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-10-27 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::msgpack-v3.0.7.asciidoc[] -include::msgpack-v3.0.6.asciidoc[] -include::msgpack-v3.0.5.asciidoc[] -include::msgpack-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/msgpack-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/msgpack-v3.0.3.asciidoc deleted file mode 100644 index 090581a4d..000000000 --- a/docs/versioned-plugins/codecs/msgpack-v3.0.3.asciidoc +++ /dev/null @@ -1,44 +0,0 @@ -:plugin: msgpack -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-msgpack/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Msgpack codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Msgpack Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * Default value is `nil` - - - - diff --git a/docs/versioned-plugins/codecs/msgpack-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/msgpack-v3.0.5.asciidoc deleted file mode 100644 index a386d87ae..000000000 --- a/docs/versioned-plugins/codecs/msgpack-v3.0.5.asciidoc +++ /dev/null @@ -1,44 +0,0 @@ -:plugin: msgpack -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-msgpack/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Msgpack codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec reads and produces MessagePack encoded content. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Msgpack Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * Default value is `nil` - - - - diff --git a/docs/versioned-plugins/codecs/msgpack-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/msgpack-v3.0.6.asciidoc deleted file mode 100644 index eec715359..000000000 --- a/docs/versioned-plugins/codecs/msgpack-v3.0.6.asciidoc +++ /dev/null @@ -1,44 +0,0 @@ -:plugin: msgpack -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-10-27 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-msgpack/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Msgpack codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec reads and produces MessagePack encoded content. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Msgpack Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * Default value is `nil` - - - - diff --git a/docs/versioned-plugins/codecs/msgpack-v3.0.7.asciidoc b/docs/versioned-plugins/codecs/msgpack-v3.0.7.asciidoc deleted file mode 100644 index 61c6426c2..000000000 --- a/docs/versioned-plugins/codecs/msgpack-v3.0.7.asciidoc +++ /dev/null @@ -1,44 +0,0 @@ -:plugin: msgpack -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.7 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-msgpack/blob/v3.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Msgpack codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec reads and produces MessagePack encoded content. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Msgpack Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * Default value is `nil` - - - - diff --git a/docs/versioned-plugins/codecs/multiline-index.asciidoc b/docs/versioned-plugins/codecs/multiline-index.asciidoc deleted file mode 100644 index cd4bda72f..000000000 --- a/docs/versioned-plugins/codecs/multiline-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: multiline -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-08-07 -| <> | 2017-06-23 -|======================================================================= - -include::multiline-v3.0.8.asciidoc[] -include::multiline-v3.0.7.asciidoc[] -include::multiline-v3.0.6.asciidoc[] -include::multiline-v3.0.5.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/multiline-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/multiline-v3.0.5.asciidoc deleted file mode 100644 index 979dccded..000000000 --- a/docs/versioned-plugins/codecs/multiline-v3.0.5.asciidoc +++ /dev/null @@ -1,222 +0,0 @@ -:plugin: multiline -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-multiline/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Multiline codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The multiline codec will collapse multiline messages and merge them into a -single event. - -IMPORTANT: If you are using a Logstash input plugin that supports multiple -hosts, such as the <> input plugin, you should not use -the multiline codec to handle multiline events. Doing so may result in the -mixing of streams and corrupted event data. In this situation, you need to -handle multiline events before sending the event data to Logstash. - -The original goal of this codec was to allow joining of multiline messages -from files into a single event. For example, joining Java exception and -stacktrace messages into a single event. - -The config looks like this: -[source,ruby] - input { - stdin { - codec => multiline { - pattern => "pattern, a regexp" - negate => "true" or "false" - what => "previous" or "next" - } - } - } - -The `pattern` should match what you believe to be an indicator that the field -is part of a multi-line event. - -The `what` must be `previous` or `next` and indicates the relation -to the multi-line event. - -The `negate` can be `true` or `false` (defaults to `false`). If `true`, a -message not matching the pattern will constitute a match of the multiline -filter and the `what` will be applied. (vice-versa is also true) - -For example, Java stack traces are multiline and usually have the message -starting at the far-left, with each subsequent line indented. Do this: -[source,ruby] - input { - stdin { - codec => multiline { - pattern => "^\s" - what => "previous" - } - } - } - -This says that any line starting with whitespace belongs to the previous line. - -Another example is to merge lines not starting with a date up to the previous -line.. -[source,ruby] - input { - file { - path => "/var/log/someapp.log" - codec => multiline { - # Grok pattern names are valid! :) - pattern => "^%{TIMESTAMP_ISO8601} " - negate => true - what => "previous" - } - } - } - -This says that any line not starting with a timestamp should be merged with the previous line. - -One more common example is C line continuations (backslash). Here's how to do that: -[source,ruby] - input { - stdin { - codec => multiline { - pattern => "\\$" - what => "next" - } - } - } - -This says that any line ending with a backslash should be combined with the -following line. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Multiline Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-auto_flush_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-max_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_lines>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-multiline_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pattern>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-what>> |<>, one of `["previous", "next"]`|Yes -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-auto_flush_interval"] -===== `auto_flush_interval` - - * Value type is <> - * There is no default value for this setting. - -The accumulation of multiple lines will be converted to an event when either a -matching new line is seen or there has been no new data appended for this many -seconds. No default. If unset, no auto_flush. Units: seconds - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this input. Examples include `UTF-8` -and `cp1252` - -This setting is useful if your log files are in `Latin-1` (aka `cp1252`) -or in another character set other than `UTF-8`. - -This only affects "plain" format logs since JSON is `UTF-8` already. - -[id="{version}-plugins-{type}s-{plugin}-max_bytes"] -===== `max_bytes` - - * Value type is <> - * Default value is `"10 MiB"` - -The accumulation of events can make logstash exit with an out of memory error -if event boundaries are not correctly defined. This settings make sure to flush -multiline events after reaching a number of bytes, it is used in combination -max_lines. - -[id="{version}-plugins-{type}s-{plugin}-max_lines"] -===== `max_lines` - - * Value type is <> - * Default value is `500` - -The accumulation of events can make logstash exit with an out of memory error -if event boundaries are not correctly defined. This settings make sure to flush -multiline events after reaching a number of lines, it is used in combination -max_bytes. - -[id="{version}-plugins-{type}s-{plugin}-multiline_tag"] -===== `multiline_tag` - - * Value type is <> - * Default value is `"multiline"` - -Tag multiline events with a given tag. This tag will only be added -to events that actually have multiple lines in them. - -[id="{version}-plugins-{type}s-{plugin}-negate"] -===== `negate` - - * Value type is <> - * Default value is `false` - -Negate the regexp pattern ('if not matched'). - -[id="{version}-plugins-{type}s-{plugin}-pattern"] -===== `pattern` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The regular expression to match. - -[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] -===== `patterns_dir` - - * Value type is <> - * Default value is `[]` - -Logstash ships by default with a bunch of patterns, so you don't -necessarily need to define this yourself unless you are adding additional -patterns. - -Pattern files are plain text with format: -[source,ruby] - NAME PATTERN - -For example: -[source,ruby] - NUMBER \d+ - -[id="{version}-plugins-{type}s-{plugin}-what"] -===== `what` - - * This is a required setting. - * Value can be any of: `previous`, `next` - * There is no default value for this setting. - -If the pattern matched, does event belong to the next or previous event? - - diff --git a/docs/versioned-plugins/codecs/multiline-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/multiline-v3.0.6.asciidoc deleted file mode 100644 index cf15d0e8f..000000000 --- a/docs/versioned-plugins/codecs/multiline-v3.0.6.asciidoc +++ /dev/null @@ -1,222 +0,0 @@ -:plugin: multiline -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-08-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-multiline/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Multiline codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The multiline codec will collapse multiline messages and merge them into a -single event. - -IMPORTANT: If you are using a Logstash input plugin that supports multiple -hosts, such as the <> input plugin, you should not use -the multiline codec to handle multiline events. Doing so may result in the -mixing of streams and corrupted event data. In this situation, you need to -handle multiline events before sending the event data to Logstash. - -The original goal of this codec was to allow joining of multiline messages -from files into a single event. For example, joining Java exception and -stacktrace messages into a single event. - -The config looks like this: -[source,ruby] - input { - stdin { - codec => multiline { - pattern => "pattern, a regexp" - negate => "true" or "false" - what => "previous" or "next" - } - } - } - -The `pattern` should match what you believe to be an indicator that the field -is part of a multi-line event. - -The `what` must be `previous` or `next` and indicates the relation -to the multi-line event. - -The `negate` can be `true` or `false` (defaults to `false`). If `true`, a -message not matching the pattern will constitute a match of the multiline -filter and the `what` will be applied. (vice-versa is also true) - -For example, Java stack traces are multiline and usually have the message -starting at the far-left, with each subsequent line indented. Do this: -[source,ruby] - input { - stdin { - codec => multiline { - pattern => "^\s" - what => "previous" - } - } - } - -This says that any line starting with whitespace belongs to the previous line. - -Another example is to merge lines not starting with a date up to the previous -line.. -[source,ruby] - input { - file { - path => "/var/log/someapp.log" - codec => multiline { - # Grok pattern names are valid! :) - pattern => "^%{TIMESTAMP_ISO8601} " - negate => true - what => "previous" - } - } - } - -This says that any line not starting with a timestamp should be merged with the previous line. - -One more common example is C line continuations (backslash). Here's how to do that: -[source,ruby] - input { - stdin { - codec => multiline { - pattern => "\\$" - what => "next" - } - } - } - -This says that any line ending with a backslash should be combined with the -following line. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Multiline Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-auto_flush_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-max_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_lines>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-multiline_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pattern>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-what>> |<>, one of `["previous", "next"]`|Yes -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-auto_flush_interval"] -===== `auto_flush_interval` - - * Value type is <> - * There is no default value for this setting. - -The accumulation of multiple lines will be converted to an event when either a -matching new line is seen or there has been no new data appended for this many -seconds. No default. If unset, no auto_flush. Units: seconds - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this input. Examples include `UTF-8` -and `cp1252` - -This setting is useful if your log files are in `Latin-1` (aka `cp1252`) -or in another character set other than `UTF-8`. - -This only affects "plain" format logs since JSON is `UTF-8` already. - -[id="{version}-plugins-{type}s-{plugin}-max_bytes"] -===== `max_bytes` - - * Value type is <> - * Default value is `"10 MiB"` - -The accumulation of events can make logstash exit with an out of memory error -if event boundaries are not correctly defined. This settings make sure to flush -multiline events after reaching a number of bytes, it is used in combination -max_lines. - -[id="{version}-plugins-{type}s-{plugin}-max_lines"] -===== `max_lines` - - * Value type is <> - * Default value is `500` - -The accumulation of events can make logstash exit with an out of memory error -if event boundaries are not correctly defined. This settings make sure to flush -multiline events after reaching a number of lines, it is used in combination -max_bytes. - -[id="{version}-plugins-{type}s-{plugin}-multiline_tag"] -===== `multiline_tag` - - * Value type is <> - * Default value is `"multiline"` - -Tag multiline events with a given tag. This tag will only be added -to events that actually have multiple lines in them. - -[id="{version}-plugins-{type}s-{plugin}-negate"] -===== `negate` - - * Value type is <> - * Default value is `false` - -Negate the regexp pattern ('if not matched'). - -[id="{version}-plugins-{type}s-{plugin}-pattern"] -===== `pattern` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The regular expression to match. - -[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] -===== `patterns_dir` - - * Value type is <> - * Default value is `[]` - -Logstash ships by default with a bunch of patterns, so you don't -necessarily need to define this yourself unless you are adding additional -patterns. - -Pattern files are plain text with format: -[source,ruby] - NAME PATTERN - -For example: -[source,ruby] - NUMBER \d+ - -[id="{version}-plugins-{type}s-{plugin}-what"] -===== `what` - - * This is a required setting. - * Value can be any of: `previous`, `next` - * There is no default value for this setting. - -If the pattern matched, does event belong to the next or previous event? - - diff --git a/docs/versioned-plugins/codecs/multiline-v3.0.7.asciidoc b/docs/versioned-plugins/codecs/multiline-v3.0.7.asciidoc deleted file mode 100644 index 83af451cf..000000000 --- a/docs/versioned-plugins/codecs/multiline-v3.0.7.asciidoc +++ /dev/null @@ -1,222 +0,0 @@ -:plugin: multiline -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.7 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-multiline/blob/v3.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Multiline codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The multiline codec will collapse multiline messages and merge them into a -single event. - -IMPORTANT: If you are using a Logstash input plugin that supports multiple -hosts, such as the <> input plugin, you should not use -the multiline codec to handle multiline events. Doing so may result in the -mixing of streams and corrupted event data. In this situation, you need to -handle multiline events before sending the event data to Logstash. - -The original goal of this codec was to allow joining of multiline messages -from files into a single event. For example, joining Java exception and -stacktrace messages into a single event. - -The config looks like this: -[source,ruby] - input { - stdin { - codec => multiline { - pattern => "pattern, a regexp" - negate => "true" or "false" - what => "previous" or "next" - } - } - } - -The `pattern` should match what you believe to be an indicator that the field -is part of a multi-line event. - -The `what` must be `previous` or `next` and indicates the relation -to the multi-line event. - -The `negate` can be `true` or `false` (defaults to `false`). If `true`, a -message not matching the pattern will constitute a match of the multiline -filter and the `what` will be applied. (vice-versa is also true) - -For example, Java stack traces are multiline and usually have the message -starting at the far-left, with each subsequent line indented. Do this: -[source,ruby] - input { - stdin { - codec => multiline { - pattern => "^\s" - what => "previous" - } - } - } - -This says that any line starting with whitespace belongs to the previous line. - -Another example is to merge lines not starting with a date up to the previous -line.. -[source,ruby] - input { - file { - path => "/var/log/someapp.log" - codec => multiline { - # Grok pattern names are valid! :) - pattern => "^%{TIMESTAMP_ISO8601} " - negate => true - what => "previous" - } - } - } - -This says that any line not starting with a timestamp should be merged with the previous line. - -One more common example is C line continuations (backslash). Here's how to do that: -[source,ruby] - input { - stdin { - codec => multiline { - pattern => "\\$" - what => "next" - } - } - } - -This says that any line ending with a backslash should be combined with the -following line. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Multiline Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-auto_flush_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-max_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_lines>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-multiline_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pattern>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-what>> |<>, one of `["previous", "next"]`|Yes -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-auto_flush_interval"] -===== `auto_flush_interval` - - * Value type is <> - * There is no default value for this setting. - -The accumulation of multiple lines will be converted to an event when either a -matching new line is seen or there has been no new data appended for this many -seconds. No default. If unset, no auto_flush. Units: seconds - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this input. Examples include `UTF-8` -and `cp1252` - -This setting is useful if your log files are in `Latin-1` (aka `cp1252`) -or in another character set other than `UTF-8`. - -This only affects "plain" format logs since JSON is `UTF-8` already. - -[id="{version}-plugins-{type}s-{plugin}-max_bytes"] -===== `max_bytes` - - * Value type is <> - * Default value is `"10 MiB"` - -The accumulation of events can make logstash exit with an out of memory error -if event boundaries are not correctly defined. This settings make sure to flush -multiline events after reaching a number of bytes, it is used in combination -max_lines. - -[id="{version}-plugins-{type}s-{plugin}-max_lines"] -===== `max_lines` - - * Value type is <> - * Default value is `500` - -The accumulation of events can make logstash exit with an out of memory error -if event boundaries are not correctly defined. This settings make sure to flush -multiline events after reaching a number of lines, it is used in combination -max_bytes. - -[id="{version}-plugins-{type}s-{plugin}-multiline_tag"] -===== `multiline_tag` - - * Value type is <> - * Default value is `"multiline"` - -Tag multiline events with a given tag. This tag will only be added -to events that actually have multiple lines in them. - -[id="{version}-plugins-{type}s-{plugin}-negate"] -===== `negate` - - * Value type is <> - * Default value is `false` - -Negate the regexp pattern ('if not matched'). - -[id="{version}-plugins-{type}s-{plugin}-pattern"] -===== `pattern` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The regular expression to match. - -[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] -===== `patterns_dir` - - * Value type is <> - * Default value is `[]` - -Logstash ships by default with a bunch of patterns, so you don't -necessarily need to define this yourself unless you are adding additional -patterns. - -Pattern files are plain text with format: -[source,ruby] - NAME PATTERN - -For example: -[source,ruby] - NUMBER \d+ - -[id="{version}-plugins-{type}s-{plugin}-what"] -===== `what` - - * This is a required setting. - * Value can be any of: `previous`, `next` - * There is no default value for this setting. - -If the pattern matched, does event belong to the next or previous event? - - diff --git a/docs/versioned-plugins/codecs/multiline-v3.0.8.asciidoc b/docs/versioned-plugins/codecs/multiline-v3.0.8.asciidoc deleted file mode 100644 index a67f3675b..000000000 --- a/docs/versioned-plugins/codecs/multiline-v3.0.8.asciidoc +++ /dev/null @@ -1,222 +0,0 @@ -:plugin: multiline -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.8 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-multiline/blob/v3.0.8/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Multiline codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The multiline codec will collapse multiline messages and merge them into a -single event. - -IMPORTANT: If you are using a Logstash input plugin that supports multiple -hosts, such as the <> input plugin, you should not use -the multiline codec to handle multiline events. Doing so may result in the -mixing of streams and corrupted event data. In this situation, you need to -handle multiline events before sending the event data to Logstash. - -The original goal of this codec was to allow joining of multiline messages -from files into a single event. For example, joining Java exception and -stacktrace messages into a single event. - -The config looks like this: -[source,ruby] - input { - stdin { - codec => multiline { - pattern => "pattern, a regexp" - negate => "true" or "false" - what => "previous" or "next" - } - } - } - -The `pattern` should match what you believe to be an indicator that the field -is part of a multi-line event. - -The `what` must be `previous` or `next` and indicates the relation -to the multi-line event. - -The `negate` can be `true` or `false` (defaults to `false`). If `true`, a -message not matching the pattern will constitute a match of the multiline -filter and the `what` will be applied. (vice-versa is also true) - -For example, Java stack traces are multiline and usually have the message -starting at the far-left, with each subsequent line indented. Do this: -[source,ruby] - input { - stdin { - codec => multiline { - pattern => "^\s" - what => "previous" - } - } - } - -This says that any line starting with whitespace belongs to the previous line. - -Another example is to merge lines not starting with a date up to the previous -line.. -[source,ruby] - input { - file { - path => "/var/log/someapp.log" - codec => multiline { - # Grok pattern names are valid! :) - pattern => "^%{TIMESTAMP_ISO8601} " - negate => true - what => "previous" - } - } - } - -This says that any line not starting with a timestamp should be merged with the previous line. - -One more common example is C line continuations (backslash). Here's how to do that: -[source,ruby] - input { - stdin { - codec => multiline { - pattern => "\\$" - what => "next" - } - } - } - -This says that any line ending with a backslash should be combined with the -following line. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Multiline Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-auto_flush_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-max_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_lines>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-multiline_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pattern>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-what>> |<>, one of `["previous", "next"]`|Yes -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-auto_flush_interval"] -===== `auto_flush_interval` - - * Value type is <> - * There is no default value for this setting. - -The accumulation of multiple lines will be converted to an event when either a -matching new line is seen or there has been no new data appended for this many -seconds. No default. If unset, no auto_flush. Units: seconds - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this input. Examples include `UTF-8` -and `cp1252` - -This setting is useful if your log files are in `Latin-1` (aka `cp1252`) -or in another character set other than `UTF-8`. - -This only affects "plain" format logs since JSON is `UTF-8` already. - -[id="{version}-plugins-{type}s-{plugin}-max_bytes"] -===== `max_bytes` - - * Value type is <> - * Default value is `"10 MiB"` - -The accumulation of events can make logstash exit with an out of memory error -if event boundaries are not correctly defined. This settings make sure to flush -multiline events after reaching a number of bytes, it is used in combination -max_lines. - -[id="{version}-plugins-{type}s-{plugin}-max_lines"] -===== `max_lines` - - * Value type is <> - * Default value is `500` - -The accumulation of events can make logstash exit with an out of memory error -if event boundaries are not correctly defined. This settings make sure to flush -multiline events after reaching a number of lines, it is used in combination -max_bytes. - -[id="{version}-plugins-{type}s-{plugin}-multiline_tag"] -===== `multiline_tag` - - * Value type is <> - * Default value is `"multiline"` - -Tag multiline events with a given tag. This tag will only be added -to events that actually have multiple lines in them. - -[id="{version}-plugins-{type}s-{plugin}-negate"] -===== `negate` - - * Value type is <> - * Default value is `false` - -Negate the regexp pattern ('if not matched'). - -[id="{version}-plugins-{type}s-{plugin}-pattern"] -===== `pattern` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The regular expression to match. - -[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] -===== `patterns_dir` - - * Value type is <> - * Default value is `[]` - -Logstash ships by default with a bunch of patterns, so you don't -necessarily need to define this yourself unless you are adding additional -patterns. - -Pattern files are plain text with format: -[source,ruby] - NAME PATTERN - -For example: -[source,ruby] - NUMBER \d+ - -[id="{version}-plugins-{type}s-{plugin}-what"] -===== `what` - - * This is a required setting. - * Value can be any of: `previous`, `next` - * There is no default value for this setting. - -If the pattern matched, does event belong to the next or previous event? - - diff --git a/docs/versioned-plugins/codecs/netflow-index.asciidoc b/docs/versioned-plugins/codecs/netflow-index.asciidoc deleted file mode 100644 index c0f1a626f..000000000 --- a/docs/versioned-plugins/codecs/netflow-index.asciidoc +++ /dev/null @@ -1,34 +0,0 @@ -:plugin: netflow -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-12-30 -| <> | 2017-12-03 -| <> | 2017-11-23 -| <> | 2017-11-19 -| <> | 2017-11-11 -| <> | 2017-11-07 -| <> | 2017-09-29 -| <> | 2017-09-28 -| <> | 2017-08-15 -| <> | 2017-07-18 -| <> | 2017-06-23 -| <> | 2017-06-23 -|======================================================================= - -include::netflow-v3.9.1.asciidoc[] -include::netflow-v3.9.0.asciidoc[] -include::netflow-v3.8.3.asciidoc[] -include::netflow-v3.8.1.asciidoc[] -include::netflow-v3.8.0.asciidoc[] -include::netflow-v3.7.1.asciidoc[] -include::netflow-v3.7.0.asciidoc[] -include::netflow-v3.6.0.asciidoc[] -include::netflow-v3.5.2.asciidoc[] -include::netflow-v3.5.1.asciidoc[] -include::netflow-v3.5.0.asciidoc[] -include::netflow-v3.4.1.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/netflow-v3.10.0.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.10.0.asciidoc deleted file mode 100644 index cce6a4612..000000000 --- a/docs/versioned-plugins/codecs/netflow-v3.10.0.asciidoc +++ /dev/null @@ -1,210 +0,0 @@ -:plugin: netflow -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.10.0 -:release_date: 2017-12-30 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.10.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Netflow codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. - -==== Supported Netflow/IPFIX exporters - -This codec supports: - -* Netflow v5 -* Netflow v9 -* IPFIX - -The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: - -[cols="6,^2,^2,^2,12",options="header"] -|=========================================================================================== -|Netflow exporter | v5 | v9 | IPFIX | Remarks -|Barracuda Firewall | | | y | -|Cisco ASA | | y | | -|Cisco ASR 1k | | | n | Fails because of duplicate fields -|Cisco ASR 9k | | y | | -|Cisco IOS 12.x | | y | | -|Cisco ISR w/ HSL | | n | | Fails because of duplicate fields, see: https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 -|Cisco WLC | | y | | -|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown -|fprobe | y | | | -|Fortigate FortiOS | | y | | -|ipt_NETFLOW | y | y | y | -|Juniper MX80 | y | | | SW > 12.3R8 -|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow -|nProbe | y | y | y | L7 DPI fields now also supported -|Nokia BRAS | | | y | -|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 -|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd -|Streamcore Streamgroomer | | y | | -|Palo Alto PAN-OS | | y | | -|Ubiquiti Edgerouter X | | y | | With MPLS labels -|VMware VDS | | | y | Still some unknown fields -|YAF | | | y | With silk and applabel, but no DPI plugin support -|vIPtela | | | y | -|=========================================================================================== - -==== Usage - -Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - } -} --------------------------- - -For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - receive_buffer_bytes => 16777216 - workers => 16 - } --------------------------- - -To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: - - # sysctl -w net.core.rmem_max=$((1024*1024*16)) - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Netflow Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] -===== `cache_save_path` - - * Value type is <> - * There is no default value for this setting. - -Enables the template cache and saves it in the specified directory. This -minimizes data loss after Logstash restarts because the codec doesn't have to -wait for the arrival of templates, but instead reload already received -templates received during previous runs. - -Template caches are saved as: - -* <>/netflow_templates.cache for Netflow v9 templates. -* <>/ipfix_templates.cache for IPFIX templates. - -[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] -===== `cache_ttl` - - * Value type is <> - * Default value is `4000` - -Netflow v9/v10 template cache TTL (seconds) - -[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] -===== `include_flowset_id` - - * Value type is <> - * Default value is `false` - -Only makes sense for ipfix, v9 already includes this -Setting to true will include the flowset_id in events -Allows you to work with sequences, for instance with the aggregate filter - -[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] -===== `ipfix_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing IPFIX field definitions - -Very similar to the Netflow version except there is a top level Private -Enterprise Number (PEN) key added: - -[source,yaml] --------------------------- -pen: -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -There is an implicit PEN 0 for the standard fields. - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] -===== `netflow_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing Netflow field definitions - -Each Netflow field is defined like so: - -[source,yaml] --------------------------- -id: -- default length in bytes -- :name -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"netflow"` - -Specify into what field you want the Netflow data. - -[id="{version}-plugins-{type}s-{plugin}-versions"] -===== `versions` - - * Value type is <> - * Default value is `[5, 9, 10]` - -Specify which Netflow versions you will accept. - - diff --git a/docs/versioned-plugins/codecs/netflow-v3.4.1.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.4.1.asciidoc deleted file mode 100644 index e453dbfda..000000000 --- a/docs/versioned-plugins/codecs/netflow-v3.4.1.asciidoc +++ /dev/null @@ -1,192 +0,0 @@ -:plugin: netflow -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.4.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.4.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Netflow codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. - -==== Supported Netflow/IPFIX exporters - -The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: - -[cols="6,^2,^2,^2,12",options="header"] -|=========================================================================================== -|Netflow exporter | v5 | v9 | IPFIX | Remarks -|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd -|nProbe | y | y | y | -|ipt_NETFLOW | y | y | y | -|Cisco ASA | | y | | -|Cisco IOS 12.x | | y | | -|fprobe | y | | | -|Juniper MX80 | y | | | SW > 12.3R8 -|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 -|Mikrotik 6.35.4 | y | | n | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow -|Ubiquiti Edgerouter X | | y | | With MPLS labels -|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown -|=========================================================================================== - -==== Usage - -Example Logstash configuration: - -[source, ruby] --------------------------- -input { - udp { - host => localhost - port => 2055 - codec => netflow { - versions => [5, 9] - } - type => netflow - } - udp { - host => localhost - port => 4739 - codec => netflow { - versions => [10] - target => ipfix - } - type => ipfix - } - tcp { - host => localhost - port => 4739 - codec => netflow { - versions => [10] - target => ipfix - } - type => ipfix - } -} --------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Netflow Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] -===== `cache_save_path` - - * Value type is <> - * There is no default value for this setting. - -Where to save the template cache -This helps speed up processing when restarting logstash -(So you don't have to await the arrival of templates) -cache will save as path/netflow_templates.cache and/or path/ipfix_templates.cache - -[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] -===== `cache_ttl` - - * Value type is <> - * Default value is `4000` - -Netflow v9/v10 template cache TTL (minutes) - -[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] -===== `include_flowset_id` - - * Value type is <> - * Default value is `false` - -Only makes sense for ipfix, v9 already includes this -Setting to true will include the flowset_id in events -Allows you to work with sequences, for instance with the aggregate filter - -[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] -===== `ipfix_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing IPFIX field definitions - -Very similar to the Netflow version except there is a top level Private -Enterprise Number (PEN) key added: - -[source,yaml] --------------------------- -pen: -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -There is an implicit PEN 0 for the standard fields. - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] -===== `netflow_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing Netflow field definitions - -Each Netflow field is defined like so: - -[source,yaml] --------------------------- -id: -- default length in bytes -- :name -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"netflow"` - -Specify into what field you want the Netflow data. - -[id="{version}-plugins-{type}s-{plugin}-versions"] -===== `versions` - - * Value type is <> - * Default value is `[5, 9, 10]` - -Specify which Netflow versions you will accept. - - diff --git a/docs/versioned-plugins/codecs/netflow-v3.5.0.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.5.0.asciidoc deleted file mode 100644 index f7d2b9fa9..000000000 --- a/docs/versioned-plugins/codecs/netflow-v3.5.0.asciidoc +++ /dev/null @@ -1,192 +0,0 @@ -:plugin: netflow -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.5.0 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.5.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Netflow codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. - -==== Supported Netflow/IPFIX exporters - -The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: - -[cols="6,^2,^2,^2,12",options="header"] -|=========================================================================================== -|Netflow exporter | v5 | v9 | IPFIX | Remarks -|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd -|nProbe | y | y | y | -|ipt_NETFLOW | y | y | y | -|Cisco ASA | | y | | -|Cisco IOS 12.x | | y | | -|fprobe | y | | | -|Juniper MX80 | y | | | SW > 12.3R8 -|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 -|Mikrotik 6.35.4 | y | | n | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow -|Ubiquiti Edgerouter X | | y | | With MPLS labels -|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown -|=========================================================================================== - -==== Usage - -Example Logstash configuration: - -[source, ruby] --------------------------- -input { - udp { - host => localhost - port => 2055 - codec => netflow { - versions => [5, 9] - } - type => netflow - } - udp { - host => localhost - port => 4739 - codec => netflow { - versions => [10] - target => ipfix - } - type => ipfix - } - tcp { - host => localhost - port => 4739 - codec => netflow { - versions => [10] - target => ipfix - } - type => ipfix - } -} --------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Netflow Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] -===== `cache_save_path` - - * Value type is <> - * There is no default value for this setting. - -Where to save the template cache -This helps speed up processing when restarting logstash -(So you don't have to await the arrival of templates) -cache will save as path/netflow_templates.cache and/or path/ipfix_templates.cache - -[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] -===== `cache_ttl` - - * Value type is <> - * Default value is `4000` - -Netflow v9/v10 template cache TTL (minutes) - -[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] -===== `include_flowset_id` - - * Value type is <> - * Default value is `false` - -Only makes sense for ipfix, v9 already includes this -Setting to true will include the flowset_id in events -Allows you to work with sequences, for instance with the aggregate filter - -[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] -===== `ipfix_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing IPFIX field definitions - -Very similar to the Netflow version except there is a top level Private -Enterprise Number (PEN) key added: - -[source,yaml] --------------------------- -pen: -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -There is an implicit PEN 0 for the standard fields. - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] -===== `netflow_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing Netflow field definitions - -Each Netflow field is defined like so: - -[source,yaml] --------------------------- -id: -- default length in bytes -- :name -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"netflow"` - -Specify into what field you want the Netflow data. - -[id="{version}-plugins-{type}s-{plugin}-versions"] -===== `versions` - - * Value type is <> - * Default value is `[5, 9, 10]` - -Specify which Netflow versions you will accept. - - diff --git a/docs/versioned-plugins/codecs/netflow-v3.5.1.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.5.1.asciidoc deleted file mode 100644 index f3bef014b..000000000 --- a/docs/versioned-plugins/codecs/netflow-v3.5.1.asciidoc +++ /dev/null @@ -1,187 +0,0 @@ -:plugin: netflow -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.5.1 -:release_date: 2017-07-18 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.5.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Netflow codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. - -==== Supported Netflow/IPFIX exporters - -This codec supports: - -* Netflow v5 -* Netflow v9 -* IPFIX - -The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: - -[cols="6,^2,^2,^2,12",options="header"] -|=========================================================================================== -|Netflow exporter | v5 | v9 | IPFIX | Remarks -|Barracuda Firewall | | | y | -|Cisco ASA | | y | | -|Cisco ASR | | y | | -|Cisco IOS 12.x | | y | | -|Cisco WLC | | y | | -|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown -|fprobe | y | | | -|Fortigate FortiOS 5.2 | | y | | -|ipt_NETFLOW | y | y | y | -|Juniper MX80 | y | | | SW > 12.3R8 -|Mikrotik 6.35.4 | y | | n | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow -|nProbe | y | y | y | -|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 -|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd -|Streamcore Streamgroomer | | y | | -|Ubiquiti Edgerouter X | | y | | With MPLS labels -|VMware VDS | | | y | Still some unknown fields -|=========================================================================================== - -==== Usage - -Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - } -} --------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Netflow Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] -===== `cache_save_path` - - * Value type is <> - * There is no default value for this setting. - -Enables the template cache and saves it in the specified directory. This -minimizes data loss after Logstash restarts because the codec doesn't have to -wait for the arrival of templates, but instead reload already received -templates received during previous runs. - -Template caches are saved as: - -* <>/netflow_templates.cache for Netflow v9 templates. -* <>/ipfix_templates.cache for IPFIX templates. - -[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] -===== `cache_ttl` - - * Value type is <> - * Default value is `4000` - -Netflow v9/v10 template cache TTL (minutes) - -[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] -===== `include_flowset_id` - - * Value type is <> - * Default value is `false` - -Only makes sense for ipfix, v9 already includes this -Setting to true will include the flowset_id in events -Allows you to work with sequences, for instance with the aggregate filter - -[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] -===== `ipfix_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing IPFIX field definitions - -Very similar to the Netflow version except there is a top level Private -Enterprise Number (PEN) key added: - -[source,yaml] --------------------------- -pen: -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -There is an implicit PEN 0 for the standard fields. - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] -===== `netflow_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing Netflow field definitions - -Each Netflow field is defined like so: - -[source,yaml] --------------------------- -id: -- default length in bytes -- :name -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"netflow"` - -Specify into what field you want the Netflow data. - -[id="{version}-plugins-{type}s-{plugin}-versions"] -===== `versions` - - * Value type is <> - * Default value is `[5, 9, 10]` - -Specify which Netflow versions you will accept. - - diff --git a/docs/versioned-plugins/codecs/netflow-v3.5.2.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.5.2.asciidoc deleted file mode 100644 index 520963634..000000000 --- a/docs/versioned-plugins/codecs/netflow-v3.5.2.asciidoc +++ /dev/null @@ -1,206 +0,0 @@ -:plugin: netflow -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.5.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.5.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Netflow codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. - -==== Supported Netflow/IPFIX exporters - -This codec supports: - -* Netflow v5 -* Netflow v9 -* IPFIX - -The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: - -[cols="6,^2,^2,^2,12",options="header"] -|=========================================================================================== -|Netflow exporter | v5 | v9 | IPFIX | Remarks -|Barracuda Firewall | | | y | -|Cisco ASA | | y | | -|Cisco ASR | | y | | -|Cisco IOS 12.x | | y | | -|Cisco WLC | | y | | -|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown -|fprobe | y | | | -|Fortigate FortiOS | | y | | -|ipt_NETFLOW | y | y | y | -|Juniper MX80 | y | | | SW > 12.3R8 -|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow -|nProbe | y | y | y | -|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 -|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd -|Streamcore Streamgroomer | | y | | -|Ubiquiti Edgerouter X | | y | | With MPLS labels -|VMware VDS | | | y | Still some unknown fields -|=========================================================================================== - -==== Usage - -Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - } -} --------------------------- - -For high-performance production environments the configuration below will decode up to 6000 flows/sec on an 8 CPU instance. If your total flowrate exceeds 6000 flows/sec, you should use multiple Logstash instances. - - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - receive_buffer_bytes => 16777216 - codec => netflow - workers => 6 - } --------------------------- - -Make sure to increase the Linux kernel receive buffer limit: - - # sysctl -w net.core.rmem_max=$((1024*1024*16)) - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Netflow Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] -===== `cache_save_path` - - * Value type is <> - * There is no default value for this setting. - -Enables the template cache and saves it in the specified directory. This -minimizes data loss after Logstash restarts because the codec doesn't have to -wait for the arrival of templates, but instead reload already received -templates received during previous runs. - -Template caches are saved as: - -* <>/netflow_templates.cache for Netflow v9 templates. -* <>/ipfix_templates.cache for IPFIX templates. - -[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] -===== `cache_ttl` - - * Value type is <> - * Default value is `4000` - -Netflow v9/v10 template cache TTL (seconds) - -[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] -===== `include_flowset_id` - - * Value type is <> - * Default value is `false` - -Only makes sense for ipfix, v9 already includes this -Setting to true will include the flowset_id in events -Allows you to work with sequences, for instance with the aggregate filter - -[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] -===== `ipfix_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing IPFIX field definitions - -Very similar to the Netflow version except there is a top level Private -Enterprise Number (PEN) key added: - -[source,yaml] --------------------------- -pen: -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -There is an implicit PEN 0 for the standard fields. - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] -===== `netflow_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing Netflow field definitions - -Each Netflow field is defined like so: - -[source,yaml] --------------------------- -id: -- default length in bytes -- :name -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"netflow"` - -Specify into what field you want the Netflow data. - -[id="{version}-plugins-{type}s-{plugin}-versions"] -===== `versions` - - * Value type is <> - * Default value is `[5, 9, 10]` - -Specify which Netflow versions you will accept. - - diff --git a/docs/versioned-plugins/codecs/netflow-v3.6.0.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.6.0.asciidoc deleted file mode 100644 index aa1fd859a..000000000 --- a/docs/versioned-plugins/codecs/netflow-v3.6.0.asciidoc +++ /dev/null @@ -1,205 +0,0 @@ -:plugin: netflow -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.6.0 -:release_date: 2017-09-28 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.6.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Netflow codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. - -==== Supported Netflow/IPFIX exporters - -This codec supports: - -* Netflow v5 -* Netflow v9 -* IPFIX - -The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: - -[cols="6,^2,^2,^2,12",options="header"] -|=========================================================================================== -|Netflow exporter | v5 | v9 | IPFIX | Remarks -|Barracuda Firewall | | | y | -|Cisco ASA | | y | | -|Cisco ASR | | y | | -|Cisco IOS 12.x | | y | | -|Cisco ISR w/ HSL | | n | | https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 -|Cisco WLC | | y | | -|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown -|fprobe | y | | | -|Fortigate FortiOS | | y | | -|ipt_NETFLOW | y | y | y | -|Juniper MX80 | y | | | SW > 12.3R8 -|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow -|nProbe | y | y | y | L7 DPI fields now also supported -|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 -|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd -|Streamcore Streamgroomer | | y | | -|Ubiquiti Edgerouter X | | y | | With MPLS labels -|VMware VDS | | | y | Still some unknown fields -|=========================================================================================== - -==== Usage - -Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - } -} --------------------------- - -For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - receive_buffer_bytes => 16777216 - workers => 16 - } --------------------------- - -To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: - - # sysctl -w net.core.rmem_max=$((1024*1024*16)) - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Netflow Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] -===== `cache_save_path` - - * Value type is <> - * There is no default value for this setting. - -Enables the template cache and saves it in the specified directory. This -minimizes data loss after Logstash restarts because the codec doesn't have to -wait for the arrival of templates, but instead reload already received -templates received during previous runs. - -Template caches are saved as: - -* <>/netflow_templates.cache for Netflow v9 templates. -* <>/ipfix_templates.cache for IPFIX templates. - -[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] -===== `cache_ttl` - - * Value type is <> - * Default value is `4000` - -Netflow v9/v10 template cache TTL (seconds) - -[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] -===== `include_flowset_id` - - * Value type is <> - * Default value is `false` - -Only makes sense for ipfix, v9 already includes this -Setting to true will include the flowset_id in events -Allows you to work with sequences, for instance with the aggregate filter - -[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] -===== `ipfix_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing IPFIX field definitions - -Very similar to the Netflow version except there is a top level Private -Enterprise Number (PEN) key added: - -[source,yaml] --------------------------- -pen: -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -There is an implicit PEN 0 for the standard fields. - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] -===== `netflow_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing Netflow field definitions - -Each Netflow field is defined like so: - -[source,yaml] --------------------------- -id: -- default length in bytes -- :name -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"netflow"` - -Specify into what field you want the Netflow data. - -[id="{version}-plugins-{type}s-{plugin}-versions"] -===== `versions` - - * Value type is <> - * Default value is `[5, 9, 10]` - -Specify which Netflow versions you will accept. - - diff --git a/docs/versioned-plugins/codecs/netflow-v3.7.0.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.7.0.asciidoc deleted file mode 100644 index 7a8f2deff..000000000 --- a/docs/versioned-plugins/codecs/netflow-v3.7.0.asciidoc +++ /dev/null @@ -1,205 +0,0 @@ -:plugin: netflow -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.7.0 -:release_date: 2017-09-29 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.7.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Netflow codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. - -==== Supported Netflow/IPFIX exporters - -This codec supports: - -* Netflow v5 -* Netflow v9 -* IPFIX - -The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: - -[cols="6,^2,^2,^2,12",options="header"] -|=========================================================================================== -|Netflow exporter | v5 | v9 | IPFIX | Remarks -|Barracuda Firewall | | | y | -|Cisco ASA | | y | | -|Cisco ASR | | y | | -|Cisco IOS 12.x | | y | | -|Cisco ISR w/ HSL | | n | | https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 -|Cisco WLC | | y | | -|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown -|fprobe | y | | | -|Fortigate FortiOS | | y | | -|ipt_NETFLOW | y | y | y | -|Juniper MX80 | y | | | SW > 12.3R8 -|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow -|nProbe | y | y | y | L7 DPI fields now also supported -|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 -|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd -|Streamcore Streamgroomer | | y | | -|Ubiquiti Edgerouter X | | y | | With MPLS labels -|VMware VDS | | | y | Still some unknown fields -|=========================================================================================== - -==== Usage - -Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - } -} --------------------------- - -For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - receive_buffer_bytes => 16777216 - workers => 16 - } --------------------------- - -To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: - - # sysctl -w net.core.rmem_max=$((1024*1024*16)) - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Netflow Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] -===== `cache_save_path` - - * Value type is <> - * There is no default value for this setting. - -Enables the template cache and saves it in the specified directory. This -minimizes data loss after Logstash restarts because the codec doesn't have to -wait for the arrival of templates, but instead reload already received -templates received during previous runs. - -Template caches are saved as: - -* <>/netflow_templates.cache for Netflow v9 templates. -* <>/ipfix_templates.cache for IPFIX templates. - -[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] -===== `cache_ttl` - - * Value type is <> - * Default value is `4000` - -Netflow v9/v10 template cache TTL (seconds) - -[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] -===== `include_flowset_id` - - * Value type is <> - * Default value is `false` - -Only makes sense for ipfix, v9 already includes this -Setting to true will include the flowset_id in events -Allows you to work with sequences, for instance with the aggregate filter - -[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] -===== `ipfix_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing IPFIX field definitions - -Very similar to the Netflow version except there is a top level Private -Enterprise Number (PEN) key added: - -[source,yaml] --------------------------- -pen: -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -There is an implicit PEN 0 for the standard fields. - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] -===== `netflow_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing Netflow field definitions - -Each Netflow field is defined like so: - -[source,yaml] --------------------------- -id: -- default length in bytes -- :name -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"netflow"` - -Specify into what field you want the Netflow data. - -[id="{version}-plugins-{type}s-{plugin}-versions"] -===== `versions` - - * Value type is <> - * Default value is `[5, 9, 10]` - -Specify which Netflow versions you will accept. - - diff --git a/docs/versioned-plugins/codecs/netflow-v3.7.1.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.7.1.asciidoc deleted file mode 100644 index 64993ba08..000000000 --- a/docs/versioned-plugins/codecs/netflow-v3.7.1.asciidoc +++ /dev/null @@ -1,205 +0,0 @@ -:plugin: netflow -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.7.1 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.7.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Netflow codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. - -==== Supported Netflow/IPFIX exporters - -This codec supports: - -* Netflow v5 -* Netflow v9 -* IPFIX - -The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: - -[cols="6,^2,^2,^2,12",options="header"] -|=========================================================================================== -|Netflow exporter | v5 | v9 | IPFIX | Remarks -|Barracuda Firewall | | | y | -|Cisco ASA | | y | | -|Cisco ASR | | y | | -|Cisco IOS 12.x | | y | | -|Cisco ISR w/ HSL | | n | | https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 -|Cisco WLC | | y | | -|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown -|fprobe | y | | | -|Fortigate FortiOS | | y | | -|ipt_NETFLOW | y | y | y | -|Juniper MX80 | y | | | SW > 12.3R8 -|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow -|nProbe | y | y | y | L7 DPI fields now also supported -|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 -|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd -|Streamcore Streamgroomer | | y | | -|Ubiquiti Edgerouter X | | y | | With MPLS labels -|VMware VDS | | | y | Still some unknown fields -|=========================================================================================== - -==== Usage - -Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - } -} --------------------------- - -For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - receive_buffer_bytes => 16777216 - workers => 16 - } --------------------------- - -To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: - - # sysctl -w net.core.rmem_max=$((1024*1024*16)) - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Netflow Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] -===== `cache_save_path` - - * Value type is <> - * There is no default value for this setting. - -Enables the template cache and saves it in the specified directory. This -minimizes data loss after Logstash restarts because the codec doesn't have to -wait for the arrival of templates, but instead reload already received -templates received during previous runs. - -Template caches are saved as: - -* <>/netflow_templates.cache for Netflow v9 templates. -* <>/ipfix_templates.cache for IPFIX templates. - -[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] -===== `cache_ttl` - - * Value type is <> - * Default value is `4000` - -Netflow v9/v10 template cache TTL (seconds) - -[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] -===== `include_flowset_id` - - * Value type is <> - * Default value is `false` - -Only makes sense for ipfix, v9 already includes this -Setting to true will include the flowset_id in events -Allows you to work with sequences, for instance with the aggregate filter - -[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] -===== `ipfix_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing IPFIX field definitions - -Very similar to the Netflow version except there is a top level Private -Enterprise Number (PEN) key added: - -[source,yaml] --------------------------- -pen: -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -There is an implicit PEN 0 for the standard fields. - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] -===== `netflow_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing Netflow field definitions - -Each Netflow field is defined like so: - -[source,yaml] --------------------------- -id: -- default length in bytes -- :name -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"netflow"` - -Specify into what field you want the Netflow data. - -[id="{version}-plugins-{type}s-{plugin}-versions"] -===== `versions` - - * Value type is <> - * Default value is `[5, 9, 10]` - -Specify which Netflow versions you will accept. - - diff --git a/docs/versioned-plugins/codecs/netflow-v3.8.0.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.8.0.asciidoc deleted file mode 100644 index 1b24368d1..000000000 --- a/docs/versioned-plugins/codecs/netflow-v3.8.0.asciidoc +++ /dev/null @@ -1,205 +0,0 @@ -:plugin: netflow -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.8.0 -:release_date: 2017-11-11 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.8.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Netflow codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. - -==== Supported Netflow/IPFIX exporters - -This codec supports: - -* Netflow v5 -* Netflow v9 -* IPFIX - -The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: - -[cols="6,^2,^2,^2,12",options="header"] -|=========================================================================================== -|Netflow exporter | v5 | v9 | IPFIX | Remarks -|Barracuda Firewall | | | y | -|Cisco ASA | | y | | -|Cisco ASR | | y | | -|Cisco IOS 12.x | | y | | -|Cisco ISR w/ HSL | | n | | https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 -|Cisco WLC | | y | | -|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown -|fprobe | y | | | -|Fortigate FortiOS | | y | | -|ipt_NETFLOW | y | y | y | -|Juniper MX80 | y | | | SW > 12.3R8 -|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow -|nProbe | y | y | y | L7 DPI fields now also supported -|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 -|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd -|Streamcore Streamgroomer | | y | | -|Ubiquiti Edgerouter X | | y | | With MPLS labels -|VMware VDS | | | y | Still some unknown fields -|=========================================================================================== - -==== Usage - -Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - } -} --------------------------- - -For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - receive_buffer_bytes => 16777216 - workers => 16 - } --------------------------- - -To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: - - # sysctl -w net.core.rmem_max=$((1024*1024*16)) - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Netflow Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] -===== `cache_save_path` - - * Value type is <> - * There is no default value for this setting. - -Enables the template cache and saves it in the specified directory. This -minimizes data loss after Logstash restarts because the codec doesn't have to -wait for the arrival of templates, but instead reload already received -templates received during previous runs. - -Template caches are saved as: - -* <>/netflow_templates.cache for Netflow v9 templates. -* <>/ipfix_templates.cache for IPFIX templates. - -[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] -===== `cache_ttl` - - * Value type is <> - * Default value is `4000` - -Netflow v9/v10 template cache TTL (seconds) - -[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] -===== `include_flowset_id` - - * Value type is <> - * Default value is `false` - -Only makes sense for ipfix, v9 already includes this -Setting to true will include the flowset_id in events -Allows you to work with sequences, for instance with the aggregate filter - -[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] -===== `ipfix_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing IPFIX field definitions - -Very similar to the Netflow version except there is a top level Private -Enterprise Number (PEN) key added: - -[source,yaml] --------------------------- -pen: -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -There is an implicit PEN 0 for the standard fields. - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] -===== `netflow_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing Netflow field definitions - -Each Netflow field is defined like so: - -[source,yaml] --------------------------- -id: -- default length in bytes -- :name -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"netflow"` - -Specify into what field you want the Netflow data. - -[id="{version}-plugins-{type}s-{plugin}-versions"] -===== `versions` - - * Value type is <> - * Default value is `[5, 9, 10]` - -Specify which Netflow versions you will accept. - - diff --git a/docs/versioned-plugins/codecs/netflow-v3.8.1.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.8.1.asciidoc deleted file mode 100644 index 849eb662c..000000000 --- a/docs/versioned-plugins/codecs/netflow-v3.8.1.asciidoc +++ /dev/null @@ -1,207 +0,0 @@ -:plugin: netflow -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.8.1 -:release_date: 2017-11-19 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.8.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Netflow codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. - -==== Supported Netflow/IPFIX exporters - -This codec supports: - -* Netflow v5 -* Netflow v9 -* IPFIX - -The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: - -[cols="6,^2,^2,^2,12",options="header"] -|=========================================================================================== -|Netflow exporter | v5 | v9 | IPFIX | Remarks -|Barracuda Firewall | | | y | -|Cisco ASA | | y | | -|Cisco ASR | | y | | -|Cisco IOS 12.x | | y | | -|Cisco ISR w/ HSL | | n | | https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 -|Cisco WLC | | y | | -|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown -|fprobe | y | | | -|Fortigate FortiOS | | y | | -|ipt_NETFLOW | y | y | y | -|Juniper MX80 | y | | | SW > 12.3R8 -|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow -|nProbe | y | y | y | L7 DPI fields now also supported -|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 -|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd -|Streamcore Streamgroomer | | y | | -|Palo Alto PAN-OS | | y | | -|Ubiquiti Edgerouter X | | y | | With MPLS labels -|VMware VDS | | | y | Still some unknown fields -|YAF | | | y | With silk and applabel, but no DPI plugin support -|=========================================================================================== - -==== Usage - -Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - } -} --------------------------- - -For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - receive_buffer_bytes => 16777216 - workers => 16 - } --------------------------- - -To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: - - # sysctl -w net.core.rmem_max=$((1024*1024*16)) - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Netflow Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] -===== `cache_save_path` - - * Value type is <> - * There is no default value for this setting. - -Enables the template cache and saves it in the specified directory. This -minimizes data loss after Logstash restarts because the codec doesn't have to -wait for the arrival of templates, but instead reload already received -templates received during previous runs. - -Template caches are saved as: - -* <>/netflow_templates.cache for Netflow v9 templates. -* <>/ipfix_templates.cache for IPFIX templates. - -[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] -===== `cache_ttl` - - * Value type is <> - * Default value is `4000` - -Netflow v9/v10 template cache TTL (seconds) - -[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] -===== `include_flowset_id` - - * Value type is <> - * Default value is `false` - -Only makes sense for ipfix, v9 already includes this -Setting to true will include the flowset_id in events -Allows you to work with sequences, for instance with the aggregate filter - -[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] -===== `ipfix_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing IPFIX field definitions - -Very similar to the Netflow version except there is a top level Private -Enterprise Number (PEN) key added: - -[source,yaml] --------------------------- -pen: -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -There is an implicit PEN 0 for the standard fields. - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] -===== `netflow_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing Netflow field definitions - -Each Netflow field is defined like so: - -[source,yaml] --------------------------- -id: -- default length in bytes -- :name -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"netflow"` - -Specify into what field you want the Netflow data. - -[id="{version}-plugins-{type}s-{plugin}-versions"] -===== `versions` - - * Value type is <> - * Default value is `[5, 9, 10]` - -Specify which Netflow versions you will accept. - - diff --git a/docs/versioned-plugins/codecs/netflow-v3.8.3.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.8.3.asciidoc deleted file mode 100644 index 5a38c6952..000000000 --- a/docs/versioned-plugins/codecs/netflow-v3.8.3.asciidoc +++ /dev/null @@ -1,207 +0,0 @@ -:plugin: netflow -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.8.3 -:release_date: 2017-11-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.8.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Netflow codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. - -==== Supported Netflow/IPFIX exporters - -This codec supports: - -* Netflow v5 -* Netflow v9 -* IPFIX - -The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: - -[cols="6,^2,^2,^2,12",options="header"] -|=========================================================================================== -|Netflow exporter | v5 | v9 | IPFIX | Remarks -|Barracuda Firewall | | | y | -|Cisco ASA | | y | | -|Cisco ASR | | y | | -|Cisco IOS 12.x | | y | | -|Cisco ISR w/ HSL | | n | | https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 -|Cisco WLC | | y | | -|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown -|fprobe | y | | | -|Fortigate FortiOS | | y | | -|ipt_NETFLOW | y | y | y | -|Juniper MX80 | y | | | SW > 12.3R8 -|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow -|nProbe | y | y | y | L7 DPI fields now also supported -|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 -|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd -|Streamcore Streamgroomer | | y | | -|Palo Alto PAN-OS | | y | | -|Ubiquiti Edgerouter X | | y | | With MPLS labels -|VMware VDS | | | y | Still some unknown fields -|YAF | | | y | With silk and applabel, but no DPI plugin support -|=========================================================================================== - -==== Usage - -Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - } -} --------------------------- - -For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - receive_buffer_bytes => 16777216 - workers => 16 - } --------------------------- - -To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: - - # sysctl -w net.core.rmem_max=$((1024*1024*16)) - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Netflow Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] -===== `cache_save_path` - - * Value type is <> - * There is no default value for this setting. - -Enables the template cache and saves it in the specified directory. This -minimizes data loss after Logstash restarts because the codec doesn't have to -wait for the arrival of templates, but instead reload already received -templates received during previous runs. - -Template caches are saved as: - -* <>/netflow_templates.cache for Netflow v9 templates. -* <>/ipfix_templates.cache for IPFIX templates. - -[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] -===== `cache_ttl` - - * Value type is <> - * Default value is `4000` - -Netflow v9/v10 template cache TTL (seconds) - -[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] -===== `include_flowset_id` - - * Value type is <> - * Default value is `false` - -Only makes sense for ipfix, v9 already includes this -Setting to true will include the flowset_id in events -Allows you to work with sequences, for instance with the aggregate filter - -[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] -===== `ipfix_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing IPFIX field definitions - -Very similar to the Netflow version except there is a top level Private -Enterprise Number (PEN) key added: - -[source,yaml] --------------------------- -pen: -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -There is an implicit PEN 0 for the standard fields. - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] -===== `netflow_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing Netflow field definitions - -Each Netflow field is defined like so: - -[source,yaml] --------------------------- -id: -- default length in bytes -- :name -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"netflow"` - -Specify into what field you want the Netflow data. - -[id="{version}-plugins-{type}s-{plugin}-versions"] -===== `versions` - - * Value type is <> - * Default value is `[5, 9, 10]` - -Specify which Netflow versions you will accept. - - diff --git a/docs/versioned-plugins/codecs/netflow-v3.9.0.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.9.0.asciidoc deleted file mode 100644 index 6a0af3dc4..000000000 --- a/docs/versioned-plugins/codecs/netflow-v3.9.0.asciidoc +++ /dev/null @@ -1,209 +0,0 @@ -:plugin: netflow -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.9.0 -:release_date: 2017-12-03 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.9.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Netflow codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. - -==== Supported Netflow/IPFIX exporters - -This codec supports: - -* Netflow v5 -* Netflow v9 -* IPFIX - -The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: - -[cols="6,^2,^2,^2,12",options="header"] -|=========================================================================================== -|Netflow exporter | v5 | v9 | IPFIX | Remarks -|Barracuda Firewall | | | y | -|Cisco ASA | | y | | -|Cisco ASR 1k | | | n | Fails because of duplicate fields -|Cisco ASR 9k | | y | | -|Cisco IOS 12.x | | y | | -|Cisco ISR w/ HSL | | n | | Fails because of duplicate fields, see: https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 -|Cisco WLC | | y | | -|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown -|fprobe | y | | | -|Fortigate FortiOS | | y | | -|ipt_NETFLOW | y | y | y | -|Juniper MX80 | y | | | SW > 12.3R8 -|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow -|nProbe | y | y | y | L7 DPI fields now also supported -|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 -|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd -|Streamcore Streamgroomer | | y | | -|Palo Alto PAN-OS | | y | | -|Ubiquiti Edgerouter X | | y | | With MPLS labels -|VMware VDS | | | y | Still some unknown fields -|YAF | | | y | With silk and applabel, but no DPI plugin support -|vIPtela | | | y | -|=========================================================================================== - -==== Usage - -Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - } -} --------------------------- - -For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - receive_buffer_bytes => 16777216 - workers => 16 - } --------------------------- - -To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: - - # sysctl -w net.core.rmem_max=$((1024*1024*16)) - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Netflow Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] -===== `cache_save_path` - - * Value type is <> - * There is no default value for this setting. - -Enables the template cache and saves it in the specified directory. This -minimizes data loss after Logstash restarts because the codec doesn't have to -wait for the arrival of templates, but instead reload already received -templates received during previous runs. - -Template caches are saved as: - -* <>/netflow_templates.cache for Netflow v9 templates. -* <>/ipfix_templates.cache for IPFIX templates. - -[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] -===== `cache_ttl` - - * Value type is <> - * Default value is `4000` - -Netflow v9/v10 template cache TTL (seconds) - -[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] -===== `include_flowset_id` - - * Value type is <> - * Default value is `false` - -Only makes sense for ipfix, v9 already includes this -Setting to true will include the flowset_id in events -Allows you to work with sequences, for instance with the aggregate filter - -[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] -===== `ipfix_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing IPFIX field definitions - -Very similar to the Netflow version except there is a top level Private -Enterprise Number (PEN) key added: - -[source,yaml] --------------------------- -pen: -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -There is an implicit PEN 0 for the standard fields. - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] -===== `netflow_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing Netflow field definitions - -Each Netflow field is defined like so: - -[source,yaml] --------------------------- -id: -- default length in bytes -- :name -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"netflow"` - -Specify into what field you want the Netflow data. - -[id="{version}-plugins-{type}s-{plugin}-versions"] -===== `versions` - - * Value type is <> - * Default value is `[5, 9, 10]` - -Specify which Netflow versions you will accept. - - diff --git a/docs/versioned-plugins/codecs/netflow-v3.9.1.asciidoc b/docs/versioned-plugins/codecs/netflow-v3.9.1.asciidoc deleted file mode 100644 index 0352129a1..000000000 --- a/docs/versioned-plugins/codecs/netflow-v3.9.1.asciidoc +++ /dev/null @@ -1,209 +0,0 @@ -:plugin: netflow -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.9.1 -:release_date: 2017-12-30 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-netflow/blob/v3.9.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Netflow codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "netflow" codec is used for decoding Netflow v5/v9/v10 (IPFIX) flows. - -==== Supported Netflow/IPFIX exporters - -This codec supports: - -* Netflow v5 -* Netflow v9 -* IPFIX - -The following Netflow/IPFIX exporters are known to work with the most recent version of the netflow codec: - -[cols="6,^2,^2,^2,12",options="header"] -|=========================================================================================== -|Netflow exporter | v5 | v9 | IPFIX | Remarks -|Barracuda Firewall | | | y | -|Cisco ASA | | y | | -|Cisco ASR 1k | | | n | Fails because of duplicate fields -|Cisco ASR 9k | | y | | -|Cisco IOS 12.x | | y | | -|Cisco ISR w/ HSL | | n | | Fails because of duplicate fields, see: https://github.com/logstash-plugins/logstash-codec-netflow/issues/93 -|Cisco WLC | | y | | -|Citrix Netscaler | | | y | Still some unknown fields, labeled netscalerUnknown -|fprobe | y | | | -|Fortigate FortiOS | | y | | -|ipt_NETFLOW | y | y | y | -|Juniper MX80 | y | | | SW > 12.3R8 -|Mikrotik | y | | y | http://wiki.mikrotik.com/wiki/Manual:IP/Traffic_Flow -|nProbe | y | y | y | L7 DPI fields now also supported -|OpenBSD pflow | y | n | y | http://man.openbsd.org/OpenBSD-current/man4/pflow.4 -|Softflowd | y | y | y | IPFIX supported in https://github.com/djmdjm/softflowd -|Streamcore Streamgroomer | | y | | -|Palo Alto PAN-OS | | y | | -|Ubiquiti Edgerouter X | | y | | With MPLS labels -|VMware VDS | | | y | Still some unknown fields -|YAF | | | y | With silk and applabel, but no DPI plugin support -|vIPtela | | | y | -|=========================================================================================== - -==== Usage - -Example Logstash configuration that will listen on 2055/udp for Netflow v5,v9 and IPFIX: - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - } -} --------------------------- - -For high-performance production environments the configuration below will decode up to 15000 flows/sec on a dedicated 16 CPU instance. If your total flowrate exceeds 15000 flows/sec, you should use multiple Logstash instances. - -[source, ruby] --------------------------- -input { - udp { - port => 2055 - codec => netflow - receive_buffer_bytes => 16777216 - workers => 16 - } --------------------------- - -To mitigate dropped packets, make sure to increase the Linux kernel receive buffer limit: - - # sysctl -w net.core.rmem_max=$((1024*1024*16)) - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Netflow Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_save_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_flowset_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ipfix_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-netflow_definitions>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-versions>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_save_path"] -===== `cache_save_path` - - * Value type is <> - * There is no default value for this setting. - -Enables the template cache and saves it in the specified directory. This -minimizes data loss after Logstash restarts because the codec doesn't have to -wait for the arrival of templates, but instead reload already received -templates received during previous runs. - -Template caches are saved as: - -* <>/netflow_templates.cache for Netflow v9 templates. -* <>/ipfix_templates.cache for IPFIX templates. - -[id="{version}-plugins-{type}s-{plugin}-cache_ttl"] -===== `cache_ttl` - - * Value type is <> - * Default value is `4000` - -Netflow v9/v10 template cache TTL (seconds) - -[id="{version}-plugins-{type}s-{plugin}-include_flowset_id"] -===== `include_flowset_id` - - * Value type is <> - * Default value is `false` - -Only makes sense for ipfix, v9 already includes this -Setting to true will include the flowset_id in events -Allows you to work with sequences, for instance with the aggregate filter - -[id="{version}-plugins-{type}s-{plugin}-ipfix_definitions"] -===== `ipfix_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing IPFIX field definitions - -Very similar to the Netflow version except there is a top level Private -Enterprise Number (PEN) key added: - -[source,yaml] --------------------------- -pen: -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -There is an implicit PEN 0 for the standard fields. - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-netflow_definitions"] -===== `netflow_definitions` - - * Value type is <> - * There is no default value for this setting. - -Override YAML file containing Netflow field definitions - -Each Netflow field is defined like so: - -[source,yaml] --------------------------- -id: -- default length in bytes -- :name -id: -- :uintN or :ip4_addr or :ip6_addr or :mac_addr or :string -- :name -id: -- :skip --------------------------- - -See for the base set. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"netflow"` - -Specify into what field you want the Netflow data. - -[id="{version}-plugins-{type}s-{plugin}-versions"] -===== `versions` - - * Value type is <> - * Default value is `[5, 9, 10]` - -Specify which Netflow versions you will accept. - - diff --git a/docs/versioned-plugins/codecs/nmap-index.asciidoc b/docs/versioned-plugins/codecs/nmap-index.asciidoc deleted file mode 100644 index 9826c0a6e..000000000 --- a/docs/versioned-plugins/codecs/nmap-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: nmap -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::nmap-v0.0.21.asciidoc[] -include::nmap-v0.0.20.asciidoc[] -include::nmap-v0.0.19.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/nmap-v0.0.19.asciidoc b/docs/versioned-plugins/codecs/nmap-v0.0.19.asciidoc deleted file mode 100644 index 04ae86079..000000000 --- a/docs/versioned-plugins/codecs/nmap-v0.0.19.asciidoc +++ /dev/null @@ -1,81 +0,0 @@ -:plugin: nmap -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v0.0.19 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-nmap/blob/v0.0.19/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Nmap codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec is used to parse https://nmap.org/[namp] output data which is serialized in XML format. Nmap ("Network Mapper") is a free and open source utility for network discovery and security auditing. -For more information on nmap, see https://nmap.org/. - -This codec can only be used for decoding data. - -Event types are listed below - -`nmap_scan_metadata`: An object containing top level information about the scan, including how many hosts were up, and how many were down. Useful for the case where you need to check if a DNS based hostname does not resolve, where both those numbers will be zero. -`nmap_host`: One event is created per host. The full data covering an individual host, including open ports and traceroute information as a nested structure. -`nmap_port`: One event is created per host/port. This duplicates data already in `nmap_host`: This was put in for the case where you want to model ports as separate documents in Elasticsearch (which Kibana prefers). -`nmap_traceroute_link`: One of these is output per traceroute 'connection', with a `from` and a `to` object describing each hop. Note that traceroute hop data is not always correct due to the fact that each tracing ICMP packet may take a different route. Also very useful for Kibana visualizations. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Nmap Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-emit_hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-emit_ports>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-emit_scan_metadata>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-emit_traceroute_links>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-emit_hosts"] -===== `emit_hosts` - - * Value type is <> - * Default value is `true` - -Emit all host data as a nested document (including ports + traceroutes) with the type 'nmap_fullscan' - -[id="{version}-plugins-{type}s-{plugin}-emit_ports"] -===== `emit_ports` - - * Value type is <> - * Default value is `true` - -Emit each port as a separate document with type 'nmap_port' - -[id="{version}-plugins-{type}s-{plugin}-emit_scan_metadata"] -===== `emit_scan_metadata` - - * Value type is <> - * Default value is `true` - -Emit scan metadata - -[id="{version}-plugins-{type}s-{plugin}-emit_traceroute_links"] -===== `emit_traceroute_links` - - * Value type is <> - * Default value is `true` - -Emit each hop_tuple of the traceroute with type 'nmap_traceroute_link' - - diff --git a/docs/versioned-plugins/codecs/nmap-v0.0.20.asciidoc b/docs/versioned-plugins/codecs/nmap-v0.0.20.asciidoc deleted file mode 100644 index 6537a5a58..000000000 --- a/docs/versioned-plugins/codecs/nmap-v0.0.20.asciidoc +++ /dev/null @@ -1,81 +0,0 @@ -:plugin: nmap -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v0.0.20 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-nmap/blob/v0.0.20/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Nmap codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec is used to parse https://nmap.org/[namp] output data which is serialized in XML format. Nmap ("Network Mapper") is a free and open source utility for network discovery and security auditing. -For more information on nmap, see https://nmap.org/. - -This codec can only be used for decoding data. - -Event types are listed below - -`nmap_scan_metadata`: An object containing top level information about the scan, including how many hosts were up, and how many were down. Useful for the case where you need to check if a DNS based hostname does not resolve, where both those numbers will be zero. -`nmap_host`: One event is created per host. The full data covering an individual host, including open ports and traceroute information as a nested structure. -`nmap_port`: One event is created per host/port. This duplicates data already in `nmap_host`: This was put in for the case where you want to model ports as separate documents in Elasticsearch (which Kibana prefers). -`nmap_traceroute_link`: One of these is output per traceroute 'connection', with a `from` and a `to` object describing each hop. Note that traceroute hop data is not always correct due to the fact that each tracing ICMP packet may take a different route. Also very useful for Kibana visualizations. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Nmap Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-emit_hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-emit_ports>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-emit_scan_metadata>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-emit_traceroute_links>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-emit_hosts"] -===== `emit_hosts` - - * Value type is <> - * Default value is `true` - -Emit all host data as a nested document (including ports + traceroutes) with the type 'nmap_fullscan' - -[id="{version}-plugins-{type}s-{plugin}-emit_ports"] -===== `emit_ports` - - * Value type is <> - * Default value is `true` - -Emit each port as a separate document with type 'nmap_port' - -[id="{version}-plugins-{type}s-{plugin}-emit_scan_metadata"] -===== `emit_scan_metadata` - - * Value type is <> - * Default value is `true` - -Emit scan metadata - -[id="{version}-plugins-{type}s-{plugin}-emit_traceroute_links"] -===== `emit_traceroute_links` - - * Value type is <> - * Default value is `true` - -Emit each hop_tuple of the traceroute with type 'nmap_traceroute_link' - - diff --git a/docs/versioned-plugins/codecs/nmap-v0.0.21.asciidoc b/docs/versioned-plugins/codecs/nmap-v0.0.21.asciidoc deleted file mode 100644 index d8199d52d..000000000 --- a/docs/versioned-plugins/codecs/nmap-v0.0.21.asciidoc +++ /dev/null @@ -1,81 +0,0 @@ -:plugin: nmap -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v0.0.21 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-nmap/blob/v0.0.21/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Nmap codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec is used to parse https://nmap.org/[namp] output data which is serialized in XML format. Nmap ("Network Mapper") is a free and open source utility for network discovery and security auditing. -For more information on nmap, see https://nmap.org/. - -This codec can only be used for decoding data. - -Event types are listed below - -`nmap_scan_metadata`: An object containing top level information about the scan, including how many hosts were up, and how many were down. Useful for the case where you need to check if a DNS based hostname does not resolve, where both those numbers will be zero. -`nmap_host`: One event is created per host. The full data covering an individual host, including open ports and traceroute information as a nested structure. -`nmap_port`: One event is created per host/port. This duplicates data already in `nmap_host`: This was put in for the case where you want to model ports as separate documents in Elasticsearch (which Kibana prefers). -`nmap_traceroute_link`: One of these is output per traceroute 'connection', with a `from` and a `to` object describing each hop. Note that traceroute hop data is not always correct due to the fact that each tracing ICMP packet may take a different route. Also very useful for Kibana visualizations. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Nmap Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-emit_hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-emit_ports>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-emit_scan_metadata>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-emit_traceroute_links>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-emit_hosts"] -===== `emit_hosts` - - * Value type is <> - * Default value is `true` - -Emit all host data as a nested document (including ports + traceroutes) with the type 'nmap_fullscan' - -[id="{version}-plugins-{type}s-{plugin}-emit_ports"] -===== `emit_ports` - - * Value type is <> - * Default value is `true` - -Emit each port as a separate document with type 'nmap_port' - -[id="{version}-plugins-{type}s-{plugin}-emit_scan_metadata"] -===== `emit_scan_metadata` - - * Value type is <> - * Default value is `true` - -Emit scan metadata - -[id="{version}-plugins-{type}s-{plugin}-emit_traceroute_links"] -===== `emit_traceroute_links` - - * Value type is <> - * Default value is `true` - -Emit each hop_tuple of the traceroute with type 'nmap_traceroute_link' - - diff --git a/docs/versioned-plugins/codecs/oldlogstashjson-index.asciidoc b/docs/versioned-plugins/codecs/oldlogstashjson-index.asciidoc deleted file mode 100644 index 49f08435a..000000000 --- a/docs/versioned-plugins/codecs/oldlogstashjson-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: oldlogstashjson -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::oldlogstashjson-v2.0.7.asciidoc[] -include::oldlogstashjson-v2.0.5.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/oldlogstashjson-v2.0.5.asciidoc b/docs/versioned-plugins/codecs/oldlogstashjson-v2.0.5.asciidoc deleted file mode 100644 index 172324e5c..000000000 --- a/docs/versioned-plugins/codecs/oldlogstashjson-v2.0.5.asciidoc +++ /dev/null @@ -1,31 +0,0 @@ -:plugin: oldlogstashjson -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-oldlogstashjson/blob/v2.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Oldlogstashjson codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Oldlogstashjson Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -|======================================================================= diff --git a/docs/versioned-plugins/codecs/oldlogstashjson-v2.0.7.asciidoc b/docs/versioned-plugins/codecs/oldlogstashjson-v2.0.7.asciidoc deleted file mode 100644 index 74a072761..000000000 --- a/docs/versioned-plugins/codecs/oldlogstashjson-v2.0.7.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -:plugin: oldlogstashjson -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.7 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-oldlogstashjson/blob/v2.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Oldlogstashjson codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -deprecated[5.0] - -Reads Logstash JSON in the schema used by Logstash versions earlier than 1.2.0 - diff --git a/docs/versioned-plugins/codecs/plain-index.asciidoc b/docs/versioned-plugins/codecs/plain-index.asciidoc deleted file mode 100644 index 86af7c0e5..000000000 --- a/docs/versioned-plugins/codecs/plain-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: plain -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-12-19 -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::plain-v3.0.6.asciidoc[] -include::plain-v3.0.5.asciidoc[] -include::plain-v3.0.4.asciidoc[] -include::plain-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/plain-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/plain-v3.0.3.asciidoc deleted file mode 100644 index 1571305b8..000000000 --- a/docs/versioned-plugins/codecs/plain-v3.0.3.asciidoc +++ /dev/null @@ -1,65 +0,0 @@ -:plugin: plain -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-plain/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Plain codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "plain" codec is for plain text with no delimiting between events. - -This is mainly useful on inputs and outputs that already have a defined -framing in their transport protocol (such as zeromq, rabbitmq, redis, etc) - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Plain Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this input. Examples include `UTF-8` -and `cp1252` - -This setting is useful if your log files are in `Latin-1` (aka `cp1252`) -or in another character set other than `UTF-8`. - -This only affects "plain" format logs since json is `UTF-8` already. - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * There is no default value for this setting. - -Set the message you which to emit for each event. This supports `sprintf` -strings. - -This setting only affects outputs (encoding of events). - - diff --git a/docs/versioned-plugins/codecs/plain-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/plain-v3.0.4.asciidoc deleted file mode 100644 index 35533ee82..000000000 --- a/docs/versioned-plugins/codecs/plain-v3.0.4.asciidoc +++ /dev/null @@ -1,65 +0,0 @@ -:plugin: plain -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-plain/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Plain codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "plain" codec is for plain text with no delimiting between events. - -This is mainly useful on inputs and outputs that already have a defined -framing in their transport protocol (such as zeromq, rabbitmq, redis, etc) - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Plain Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this input. Examples include `UTF-8` -and `cp1252` - -This setting is useful if your log files are in `Latin-1` (aka `cp1252`) -or in another character set other than `UTF-8`. - -This only affects "plain" format logs since json is `UTF-8` already. - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * There is no default value for this setting. - -Set the message you which to emit for each event. This supports `sprintf` -strings. - -This setting only affects outputs (encoding of events). - - diff --git a/docs/versioned-plugins/codecs/plain-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/plain-v3.0.5.asciidoc deleted file mode 100644 index a5b1fc125..000000000 --- a/docs/versioned-plugins/codecs/plain-v3.0.5.asciidoc +++ /dev/null @@ -1,65 +0,0 @@ -:plugin: plain -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-plain/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Plain codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "plain" codec is for plain text with no delimiting between events. - -This is mainly useful on inputs and outputs that already have a defined -framing in their transport protocol (such as zeromq, rabbitmq, redis, etc) - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Plain Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this input. Examples include `UTF-8` -and `cp1252` - -This setting is useful if your log files are in `Latin-1` (aka `cp1252`) -or in another character set other than `UTF-8`. - -This only affects "plain" format logs since json is `UTF-8` already. - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * There is no default value for this setting. - -Set the message you which to emit for each event. This supports `sprintf` -strings. - -This setting only affects outputs (encoding of events). - - diff --git a/docs/versioned-plugins/codecs/plain-v3.0.6.asciidoc b/docs/versioned-plugins/codecs/plain-v3.0.6.asciidoc deleted file mode 100644 index 4f79e2df5..000000000 --- a/docs/versioned-plugins/codecs/plain-v3.0.6.asciidoc +++ /dev/null @@ -1,65 +0,0 @@ -:plugin: plain -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-12-19 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-plain/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Plain codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "plain" codec is for plain text with no delimiting between events. - -This is mainly useful on inputs and outputs that already have a defined -framing in their transport protocol (such as zeromq, rabbitmq, redis, etc) - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Plain Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -The character encoding used in this input. Examples include `UTF-8` -and `cp1252` - -This setting is useful if your log files are in `Latin-1` (aka `cp1252`) -or in another character set other than `UTF-8`. - -This only affects "plain" format logs since json is `UTF-8` already. - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * There is no default value for this setting. - -Set the message you which to emit for each event. This supports `sprintf` -strings. - -This setting only affects outputs (encoding of events). - - diff --git a/docs/versioned-plugins/codecs/pretty-index.asciidoc b/docs/versioned-plugins/codecs/pretty-index.asciidoc deleted file mode 100644 index 13466401f..000000000 --- a/docs/versioned-plugins/codecs/pretty-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: pretty -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/codecs/protobuf-index.asciidoc b/docs/versioned-plugins/codecs/protobuf-index.asciidoc deleted file mode 100644 index c80ffa368..000000000 --- a/docs/versioned-plugins/codecs/protobuf-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: protobuf -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::protobuf-v1.0.3.asciidoc[] -include::protobuf-v1.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/protobuf-v1.0.2.asciidoc b/docs/versioned-plugins/codecs/protobuf-v1.0.2.asciidoc deleted file mode 100644 index f92ad5190..000000000 --- a/docs/versioned-plugins/codecs/protobuf-v1.0.2.asciidoc +++ /dev/null @@ -1,106 +0,0 @@ -:plugin: protobuf -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-protobuf/blob/v1.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Protobuf codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec converts protobuf encoded messages into logstash events and vice versa. - -Requires the protobuf definitions as ruby files. You can create those using the [ruby-protoc compiler](https://github.com/codekitchen/ruby-protocol-buffers). - -The following shows a usage example for decoding events from a kafka stream: -[source,ruby] -kafka -{ - zk_connect => "127.0.0.1" - topic_id => "your_topic_goes_here" - codec => protobuf - { - class_name => "Animal::Unicorn" - include_path => ['/path/to/protobuf/definitions/UnicornProtobuf.pb.rb'] - } -} - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Protobuf Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-class_name>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-include_path>> |<>|Yes -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-class_name"] -===== `class_name` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Name of the class to decode. -If your protobuf definition contains modules, prepend them to the class name with double colons like so: -[source,ruby] -class_name => "Foods::Dairy::Cheese" - -This corresponds to a protobuf definition starting as follows: -[source,ruby] -module Foods - module Dairy - class Cheese - # here are your field definitions. - -If your class references other definitions: you only have to add the main class here. - -[id="{version}-plugins-{type}s-{plugin}-include_path"] -===== `include_path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -List of absolute pathes to files with protobuf definitions. -When using more than one file, make sure to arrange the files in reverse order of dependency so that each class is loaded before it is -refered to by another. - -Example: a class _Cheese_ referencing another protobuf class _Milk_ -[source,ruby] -module Foods - module Dairy - class Cheese - set_fully_qualified_name "Foods.Dairy.Cheese" - optional ::Foods::Cheese::Milk, :milk, 1 - optional :int64, :unique_id, 2 - # here be more field definitions - -would be configured as -[source,ruby] -include_path => ['/path/to/protobuf/definitions/Milk.pb.rb','/path/to/protobuf/definitions/Cheese.pb.rb'] - -When using the codec in an output plugin: -* make sure to include all the desired fields in the protobuf definition, including timestamp. - Remove fields that are not part of the protobuf definition from the event by using the mutate filter. -* the @ symbol is currently not supported in field names when loading the protobuf definitions for encoding. Make sure to call the timestamp field "timestamp" - instead of "@timestamp" in the protobuf file. Logstash event fields will be stripped of the leading @ before conversion. - - - diff --git a/docs/versioned-plugins/codecs/protobuf-v1.0.3.asciidoc b/docs/versioned-plugins/codecs/protobuf-v1.0.3.asciidoc deleted file mode 100644 index 2db37c2c8..000000000 --- a/docs/versioned-plugins/codecs/protobuf-v1.0.3.asciidoc +++ /dev/null @@ -1,106 +0,0 @@ -:plugin: protobuf -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.3 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-protobuf/blob/v1.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Protobuf codec plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This codec converts protobuf encoded messages into logstash events and vice versa. - -Requires the protobuf definitions as ruby files. You can create those using the [ruby-protoc compiler](https://github.com/codekitchen/ruby-protocol-buffers). - -The following shows a usage example for decoding events from a kafka stream: -[source,ruby] -kafka -{ - zk_connect => "127.0.0.1" - topic_id => "your_topic_goes_here" - codec => protobuf - { - class_name => "Animal::Unicorn" - include_path => ['/path/to/protobuf/definitions/UnicornProtobuf.pb.rb'] - } -} - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Protobuf Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-class_name>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-include_path>> |<>|Yes -|======================================================================= - -  - -[id="{version}-plugins-{type}s-{plugin}-class_name"] -===== `class_name` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Name of the class to decode. -If your protobuf definition contains modules, prepend them to the class name with double colons like so: -[source,ruby] -class_name => "Foods::Dairy::Cheese" - -This corresponds to a protobuf definition starting as follows: -[source,ruby] -module Foods - module Dairy - class Cheese - # here are your field definitions. - -If your class references other definitions: you only have to add the main class here. - -[id="{version}-plugins-{type}s-{plugin}-include_path"] -===== `include_path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -List of absolute pathes to files with protobuf definitions. -When using more than one file, make sure to arrange the files in reverse order of dependency so that each class is loaded before it is -refered to by another. - -Example: a class _Cheese_ referencing another protobuf class _Milk_ -[source,ruby] -module Foods - module Dairy - class Cheese - set_fully_qualified_name "Foods.Dairy.Cheese" - optional ::Foods::Cheese::Milk, :milk, 1 - optional :int64, :unique_id, 2 - # here be more field definitions - -would be configured as -[source,ruby] -include_path => ['/path/to/protobuf/definitions/Milk.pb.rb','/path/to/protobuf/definitions/Cheese.pb.rb'] - -When using the codec in an output plugin: -* make sure to include all the desired fields in the protobuf definition, including timestamp. - Remove fields that are not part of the protobuf definition from the event by using the mutate filter. -* the @ symbol is currently not supported in field names when loading the protobuf definitions for encoding. Make sure to call the timestamp field "timestamp" - instead of "@timestamp" in the protobuf file. Logstash event fields will be stripped of the leading @ before conversion. - - - diff --git a/docs/versioned-plugins/codecs/s3plain-index.asciidoc b/docs/versioned-plugins/codecs/s3plain-index.asciidoc deleted file mode 100644 index 33ec45b85..000000000 --- a/docs/versioned-plugins/codecs/s3plain-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: s3plain -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-12-19 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::s3plain-v2.0.7.asciidoc[] -include::s3plain-v2.0.6.asciidoc[] -include::s3plain-v2.0.5.asciidoc[] - diff --git a/docs/versioned-plugins/codecs/s3plain-v2.0.5.asciidoc b/docs/versioned-plugins/codecs/s3plain-v2.0.5.asciidoc deleted file mode 100644 index acb946692..000000000 --- a/docs/versioned-plugins/codecs/s3plain-v2.0.5.asciidoc +++ /dev/null @@ -1,32 +0,0 @@ -:plugin: s3_plain -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-s3plain/blob/v2.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== S3_plain - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "s3_plain" codec is used for backward compatibility with previous version of the S3 Output - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== S3_plain Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -|======================================================================= diff --git a/docs/versioned-plugins/codecs/s3plain-v2.0.6.asciidoc b/docs/versioned-plugins/codecs/s3plain-v2.0.6.asciidoc deleted file mode 100644 index 45c9b2034..000000000 --- a/docs/versioned-plugins/codecs/s3plain-v2.0.6.asciidoc +++ /dev/null @@ -1,32 +0,0 @@ -:plugin: s3_plain -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.6 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-s3plain/blob/v2.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== S3_plain - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "s3_plain" codec is used for backward compatibility with previous version of the S3 Output - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== S3_plain Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -|======================================================================= diff --git a/docs/versioned-plugins/codecs/s3plain-v2.0.7.asciidoc b/docs/versioned-plugins/codecs/s3plain-v2.0.7.asciidoc deleted file mode 100644 index d1bcbc321..000000000 --- a/docs/versioned-plugins/codecs/s3plain-v2.0.7.asciidoc +++ /dev/null @@ -1,32 +0,0 @@ -:plugin: s3_plain -:type: codec - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.7 -:release_date: 2017-12-19 -:changelog_url: https://github.com/logstash-plugins/logstash-codec-s3plain/blob/v2.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== S3_plain - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The "s3_plain" codec is used for backward compatibility with previous version of the S3 Output - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== S3_plain Codec Configuration Options - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -|======================================================================= diff --git a/docs/versioned-plugins/codecs/sflow-index.asciidoc b/docs/versioned-plugins/codecs/sflow-index.asciidoc deleted file mode 100644 index 916b2160c..000000000 --- a/docs/versioned-plugins/codecs/sflow-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: sflow -:type: codec - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/filters-index.asciidoc b/docs/versioned-plugins/filters-index.asciidoc index a571b9941..9158a052d 100644 --- a/docs/versioned-plugins/filters-index.asciidoc +++ b/docs/versioned-plugins/filters-index.asciidoc @@ -3,67 +3,8 @@ include::include/plugin-intro.asciidoc[] -include::filters/age-index.asciidoc[] -include::filters/aggregate-index.asciidoc[] -include::filters/alter-index.asciidoc[] -include::filters/anonymize-index.asciidoc[] -include::filters/bytesize-index.asciidoc[] -include::filters/checksum-index.asciidoc[] -include::filters/cidr-index.asciidoc[] -include::filters/cipher-index.asciidoc[] -include::filters/clone-index.asciidoc[] -include::filters/cloudfoundry-index.asciidoc[] -include::filters/collate-index.asciidoc[] -include::filters/csv-index.asciidoc[] -include::filters/date-index.asciidoc[] -include::filters/de_dot-index.asciidoc[] -include::filters/debug-index.asciidoc[] -include::filters/dissect-index.asciidoc[] -include::filters/dns-index.asciidoc[] -include::filters/drop-index.asciidoc[] -include::filters/elapsed-index.asciidoc[] -include::filters/elasticsearch-index.asciidoc[] -include::filters/emoji-index.asciidoc[] -include::filters/environment-index.asciidoc[] -include::filters/example-index.asciidoc[] -include::filters/extractnumbers-index.asciidoc[] -include::filters/fingerprint-index.asciidoc[] -include::filters/geoip-index.asciidoc[] include::filters/grok-index.asciidoc[] -include::filters/hashid-index.asciidoc[] -include::filters/i18n-index.asciidoc[] -include::filters/jdbc_static-index.asciidoc[] -include::filters/jdbc_streaming-index.asciidoc[] -include::filters/json-index.asciidoc[] -include::filters/json_encode-index.asciidoc[] -include::filters/kubernetes_metadata-index.asciidoc[] -include::filters/kv-index.asciidoc[] -include::filters/language-index.asciidoc[] -include::filters/lookup-index.asciidoc[] -include::filters/math-index.asciidoc[] -include::filters/metaevent-index.asciidoc[] -include::filters/metricize-index.asciidoc[] -include::filters/metrics-index.asciidoc[] -include::filters/multiline-index.asciidoc[] include::filters/mutate-index.asciidoc[] -include::filters/oui-index.asciidoc[] -include::filters/prune-index.asciidoc[] -include::filters/punct-index.asciidoc[] -include::filters/range-index.asciidoc[] include::filters/ruby-index.asciidoc[] -include::filters/script-index.asciidoc[] include::filters/sleep-index.asciidoc[] -include::filters/split-index.asciidoc[] -include::filters/syslog_pri-index.asciidoc[] -include::filters/throttle-index.asciidoc[] -include::filters/tld-index.asciidoc[] -include::filters/translate-index.asciidoc[] -include::filters/truncate-index.asciidoc[] -include::filters/unique-index.asciidoc[] -include::filters/urldecode-index.asciidoc[] -include::filters/useragent-index.asciidoc[] -include::filters/uuid-index.asciidoc[] -include::filters/xml-index.asciidoc[] -include::filters/yaml-index.asciidoc[] -include::filters/zeromq-index.asciidoc[] diff --git a/docs/versioned-plugins/filters/age-index.asciidoc b/docs/versioned-plugins/filters/age-index.asciidoc deleted file mode 100644 index d04a41129..000000000 --- a/docs/versioned-plugins/filters/age-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: age -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::age-v1.0.2.asciidoc[] -include::age-v1.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/age-v1.0.1.asciidoc b/docs/versioned-plugins/filters/age-v1.0.1.asciidoc deleted file mode 100644 index bc61d99d0..000000000 --- a/docs/versioned-plugins/filters/age-v1.0.1.asciidoc +++ /dev/null @@ -1,66 +0,0 @@ -:plugin: age -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-age/blob/v1.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Age filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -A simple filter for calculating the age of an event. - -This filter calculates the age of an event by subtracting the event timestamp -from the current timestamp. This allows you to drop Logstash events that are -older than some threshold. - -[source,ruby] -filter { - age {} - - if [@metadata][age] > 86400 { - drop {} - } -} - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Age Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"[@metadata][age]"` - -Define the target field for the event age, in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/age-v1.0.2.asciidoc b/docs/versioned-plugins/filters/age-v1.0.2.asciidoc deleted file mode 100644 index 2fab6d4f1..000000000 --- a/docs/versioned-plugins/filters/age-v1.0.2.asciidoc +++ /dev/null @@ -1,66 +0,0 @@ -:plugin: age -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-age/blob/v1.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Age filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -A simple filter for calculating the age of an event. - -This filter calculates the age of an event by subtracting the event timestamp -from the current timestamp. This allows you to drop Logstash events that are -older than some threshold. - -[source,ruby] -filter { - age {} - - if [@metadata][age] > 86400 { - drop {} - } -} - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Age Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"[@metadata][age]"` - -Define the target field for the event age, in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/aggregate-index.asciidoc b/docs/versioned-plugins/filters/aggregate-index.asciidoc deleted file mode 100644 index b9ff863d6..000000000 --- a/docs/versioned-plugins/filters/aggregate-index.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -:plugin: aggregate -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-16 -| <> | 2017-11-07 -| <> | 2017-11-03 -| <> | 2017-10-10 -| <> | 2017-08-15 -| <> | 2017-06-23 -| <> | 2017-06-11 -|======================================================================= - -include::aggregate-v2.7.2.asciidoc[] -include::aggregate-v2.7.1.asciidoc[] -include::aggregate-v2.7.0.asciidoc[] -include::aggregate-v2.6.4.asciidoc[] -include::aggregate-v2.6.3.asciidoc[] -include::aggregate-v2.6.1.asciidoc[] -include::aggregate-v2.6.0.asciidoc[] - diff --git a/docs/versioned-plugins/filters/aggregate-v2.6.0.asciidoc b/docs/versioned-plugins/filters/aggregate-v2.6.0.asciidoc deleted file mode 100644 index 0b581f621..000000000 --- a/docs/versioned-plugins/filters/aggregate-v2.6.0.asciidoc +++ /dev/null @@ -1,552 +0,0 @@ -:plugin: aggregate -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.6.0 -:release_date: 2017-06-11 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-aggregate/blob/v2.6.0/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Aggregate - -include::{include_path}/plugin_header.asciidoc[] - - -<<{version}-plugins-{type}s-{plugin}-description>> + -<<{version}-plugins-{type}s-{plugin}-example1>> + -<<{version}-plugins-{type}s-{plugin}-example2>> + -<<{version}-plugins-{type}s-{plugin}-example3>> + -<<{version}-plugins-{type}s-{plugin}-example4>> + -<<{version}-plugins-{type}s-{plugin}-example5>> + -<<{version}-plugins-{type}s-{plugin}-howitworks>> + -<<{version}-plugins-{type}s-{plugin}-usecases>> + -<<{version}-plugins-{type}s-{plugin}-options>> + - - -[id="{version}-plugins-{type}s-{plugin}-description"] -==== Description - - -The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, -and finally push aggregated information into final task event. - -You should be very careful to set Logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly -otherwise events may be processed out of sequence and unexpected results will occur. - - -[id="{version}-plugins-{type}s-{plugin}-example1"] -==== Example #1 - -* with these given logs : - -[source,ruby] ----------------------------------- - INFO - 12345 - TASK_START - start - INFO - 12345 - SQL - sqlQuery1 - 12 - INFO - 12345 - SQL - sqlQuery2 - 34 - INFO - 12345 - TASK_END - end ----------------------------------- - -* you can aggregate "sql duration" for the whole task with this configuration : - -[source,ruby] ----------------------------------- - filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] - } - - if [logger] == "TASK_START" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] = 0" - map_action => "create" - } - } - - if [logger] == "SQL" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] += event.get('duration')" - map_action => "update" - } - } - - if [logger] == "TASK_END" { - aggregate { - task_id => "%{taskid}" - code => "event.set('sql_duration', map['sql_duration'])" - map_action => "update" - end_of_task => true - timeout => 120 - } - } - } ----------------------------------- - -* the final event then looks like : - -[source,ruby] ----------------------------------- -{ - "message" => "INFO - 12345 - TASK_END - end message", - "sql_duration" => 46 -} ----------------------------------- - -the field `sql_duration` is added and contains the sum of all sql queries durations. - - -[id="{version}-plugins-{type}s-{plugin}-example2"] -==== Example #2 : no start event - -* If you have the same logs than example #1, but without a start log : - -[source,ruby] ----------------------------------- - INFO - 12345 - SQL - sqlQuery1 - 12 - INFO - 12345 - SQL - sqlQuery2 - 34 - INFO - 12345 - TASK_END - end ----------------------------------- - -* you can also aggregate "sql duration" with a slightly different configuration : - -[source,ruby] ----------------------------------- - filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] - } - - if [logger] == "SQL" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')" - } - } - - if [logger] == "TASK_END" { - aggregate { - task_id => "%{taskid}" - code => "event.set('sql_duration', map['sql_duration'])" - end_of_task => true - timeout => 120 - } - } - } ----------------------------------- - -* the final event is exactly the same than example #1 -* the key point is the "||=" ruby operator. It allows to initialize 'sql_duration' map entry to 0 only if this map entry is not already initialized - - -[id="{version}-plugins-{type}s-{plugin}-example3"] -==== Example #3 : no end event - -Third use case: You have no specific end event. - -A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction. - -In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when a timeout occurs. -In addition, we can enable 'timeout_code' to execute code on the populated timeout event. -We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. - -* Given these logs: - -[source,ruby] ----------------------------------- -INFO - 12345 - Clicked One -INFO - 12345 - Clicked Two -INFO - 12345 - Clicked Three ----------------------------------- - -* You can aggregate the amount of clicks the user did like this: - -[source,ruby] ----------------------------------- -filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] - } - - aggregate { - task_id => "%{user_id}" - code => "map['clicks'] ||= 0; map['clicks'] += 1;" - push_map_as_event_on_timeout => true - timeout_task_id_field => "user_id" - timeout => 600 # 10 minutes timeout - timeout_tags => ['_aggregatetimeout'] - timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" - } -} ----------------------------------- - -* After ten minutes, this will yield an event like: - -[source,json] ----------------------------------- -{ - "user_id": "12345", - "clicks": 3, - "several_clicks": true, - "tags": [ - "_aggregatetimeout" - ] -} ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-example4"] -==== Example #4 : no end event and tasks come one after the other - -Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other. + -That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, ... + -In that case, you don't want to wait task timeout to flush aggregation map. + - -* A typical case is aggregating results from jdbc input plugin. -* Given that you have this SQL query : `SELECT country_name, town_name FROM town` -* Using jdbc input plugin, you get these 3 events from : - -[source,json] ----------------------------------- - { "country_name": "France", "town_name": "Paris" } - { "country_name": "France", "town_name": "Marseille" } - { "country_name": "USA", "town_name": "New-York" } ----------------------------------- - -* And you would like these 2 result events to push them into elasticsearch : - -[source,json] ----------------------------------- - { "country_name": "France", "towns": [ {"town_name": "Paris"}, {"town_name": "Marseille"} ] } - { "country_name": "USA", "towns": [ {"town_name": "New-York"} ] } ----------------------------------- - -* You can do that using `push_previous_map_as_event` aggregate plugin option : - -[source,ruby] ----------------------------------- - filter { - aggregate { - task_id => "%{country_name}" - code => " - map['country_name'] = event.get('country_name') - map['towns'] ||= [] - map['towns'] << {'town_name' => event.get('town_name')} - event.cancel() - " - push_previous_map_as_event => true - timeout => 3 - } - } ----------------------------------- - -* The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next country -* When 5s timeout comes, the last aggregate map is pushed as a new event -* Finally, initial events (which are not aggregated) are dropped because useless (thanks to `event.cancel()`) - - -[id="{version}-plugins-{type}s-{plugin}-example5"] -==== Example #5 : no end event and push events as soon as possible - -Fifth use case: like example #3, there is no end event. + -Events keep comming for an indefinite time and you want to push the aggregation map as soon as possible after the last user interaction without waiting for the `timeout`. + -This allows to have the aggregated events pushed closer to real time. + - -A typical case is aggregating or tracking user behaviour. + -We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. + -There is no specific event indicating the end of the user's interaction. + -The user ineraction will be considered as ended when no events for the specified user (task_id) arrive after the specified inactivity_timeout`. + -If the user continues interacting for longer than `timeout` seconds (since first event), the aggregation map will still be deleted and pushed as a new event when timeout occurs. + -The difference with example #3 is that the events will be pushed as soon as the user stops interacting for `inactivity_timeout` seconds instead of waiting for the end of `timeout` seconds since first event. - -In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when inactivity timeout occurs. + -In addition, we can enable 'timeout_code' to execute code on the populated timeout event. + -We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + - -* Given these logs: - -[source,ruby] ----------------------------------- -INFO - 12345 - Clicked One -INFO - 12345 - Clicked Two -INFO - 12345 - Clicked Three ----------------------------------- - -* You can aggregate the amount of clicks the user did like this: - -[source,ruby] ----------------------------------- -filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] - } - aggregate { - task_id => "%{user_id}" - code => "map['clicks'] ||= 0; map['clicks'] += 1;" - push_map_as_event_on_timeout => true - timeout_task_id_field => "user_id" - timeout => 3600 # 1 hour timeout, user activity will be considered finished one hour after the first event, even if events keep comming - inactivity_timeout => 300 # 5 minutes timeout, user activity will be considered finished if no new events arrive 5 minutes after the last event - timeout_tags => ['_aggregatetimeout'] - timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" - } -} ----------------------------------- - -* After five minutes of inactivity or one hour since first event, this will yield an event like: - -[source,json] ----------------------------------- -{ - "user_id": "12345", - "clicks": 3, - "several_clicks": true, - "tags": [ - "_aggregatetimeout" - ] -} ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-howitworks"] -==== How it works -* the filter needs a "task_id" to correlate events (log lines) of a same task -* at the task beggining, filter creates a map, attached to task_id -* for each event, you can execute code using 'event' and 'map' (for instance, copy an event field to map) -* in the final event, you can execute a last code (for instance, add map data to final event) -* after the final event, the map attached to task is deleted (thanks to `end_of_task => true`) -* an aggregate map is tied to one task_id value which is tied to one task_id pattern. So if you have 2 filters with different task_id patterns, even if you have same task_id value, they won't share the same aggregate map. -* in one filter configuration, it is recommanded to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps -* if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted -* all timeout options have to be defined in only one aggregate filter per task_id pattern. Timeout options are : timeout, inactivity_timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags -* if `code` execution raises an exception, the error is logged and event is tagged '_aggregateexception' - - -[id="{version}-plugins-{type}s-{plugin}-usecases"] -==== Use Cases -* extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2) -* extract error information in any task log line, and push it in final task event (to get a final event with all error information if any) -* extract all back-end calls as a list, and push this list in final task event (to get a task profile) -* extract all http headers logged in several lines to push this list in final task event (complete http request info) -* for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, ...) -* Finally, task id can be any correlation id matching your need : it can be a session id, a file path, ... - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Aggregate Filter Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-aggregate_maps_path>> |<>, a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-end_of_task>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-map_action>> |<>, one of `["create", "update", "create_or_update"]`|No -| <<{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-push_previous_map_as_event>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-task_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_code>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_tags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_task_id_field>> |<>|No -|======================================================================= - -Also see <> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-aggregate_maps_path"] -===== `aggregate_maps_path` - - * Value type is <> - * There is no default value for this setting. - -The path to file where aggregate maps are stored when Logstash stops -and are loaded from when Logstash starts. - -If not defined, aggregate maps will not be stored at Logstash stop and will be lost. -Must be defined in only one aggregate filter (as aggregate maps are global). - -Example: -[source,ruby] - filter { - aggregate { - aggregate_maps_path => "/path/to/.aggregate_maps" - } - } - -[id="{version}-plugins-{type}s-{plugin}-code"] -===== `code` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The code to execute to update map, using current event. - -Or on the contrary, the code to execute to update event, using current map. - -You will have a 'map' variable and an 'event' variable available (that is the event itself). - -Example: -[source,ruby] - filter { - aggregate { - code => "map['sql_duration'] += event.get('duration')" - } - } - -[id="{version}-plugins-{type}s-{plugin}-end_of_task"] -===== `end_of_task` - - * Value type is <> - * Default value is `false` - -Tell the filter that task is ended, and therefore, to delete aggregate map after code execution. - -[id="{version}-plugins-{type}s-{plugin}-inactivity_timeout"] -===== `inactivity_timeout` - - * Value type is <> - * There is no default value for this setting. - -The amount of seconds (since the last event) after which a task is considered as expired. - -When timeout occurs for a task, its aggregate map is evicted. - -If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. - -`inactivity_timeout` can be defined for each "task_id" pattern. - -`inactivity_timeout` must be lower than `timeout`. - -[id="{version}-plugins-{type}s-{plugin}-map_action"] -===== `map_action` - - * Value type is <> - * Default value is `"create_or_update"` - -Tell the filter what to do with aggregate map. - -`"create"`: create the map, and execute the code only if map wasn't created before - -`"update"`: doesn't create the map, and execute the code only if map was created before - -`"create_or_update"`: create the map if it wasn't created before, execute the code in all cases - -[id="{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout"] -===== `push_map_as_event_on_timeout` - - * Value type is <> - * Default value is `false` - -When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new Logstash event. -This enables to detect and process task timeouts in Logstash, but also to manage tasks that have no explicit end event. - -[id="{version}-plugins-{type}s-{plugin}-push_previous_map_as_event"] -===== `push_previous_map_as_event` - - * Value type is <> - * Default value is `false` - -When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new Logstash event, -and then creates a new empty map for the next task. - -WARNING: this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc... - -[id="{version}-plugins-{type}s-{plugin}-task_id"] -===== `task_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The expression defining task ID to correlate logs. - -This value must uniquely identify the task. - -Example: -[source,ruby] - filter { - aggregate { - task_id => "%{type}%{my_task_id}" - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `1800` - -The amount of seconds (since the first event) after which a task is considered as expired. - -When timeout occurs for a task, its aggregate map is evicted. - -If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. - -Timeout can be defined for each "task_id" pattern. - -[id="{version}-plugins-{type}s-{plugin}-timeout_code"] -===== `timeout_code` - - * Value type is <> - * There is no default value for this setting. - -The code to execute to complete timeout generated event, when `'push_map_as_event_on_timeout'` or `'push_previous_map_as_event'` is set to true. -The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map. - -If `'timeout_task_id_field'` is set, the event is also populated with the task_id value - -Example: -[source,ruby] - filter { - aggregate { - timeout_code => "event.set('state', 'timeout')" - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout_tags"] -===== `timeout_tags` - - * Value type is <> - * Default value is `[]` - -Defines tags to add when a timeout event is generated and yield - -Example: -[source,ruby] - filter { - aggregate { - timeout_tags => ["aggregate_timeout'] - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout_task_id_field"] -===== `timeout_task_id_field` - - * Value type is <> - * There is no default value for this setting. - -This option indicates the timeout generated event's field for the "task_id" value. -The task id will then be set into the timeout event. This can help correlate which tasks have been timed out. - -For example, with option `timeout_task_id_field => "my_id"` ,when timeout task id is `"12345"`, the generated timeout event will contain `'my_id' => '12345'`. - -By default, if this option is not set, task id value won't be set into timeout generated event. - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/aggregate-v2.6.1.asciidoc b/docs/versioned-plugins/filters/aggregate-v2.6.1.asciidoc deleted file mode 100644 index 606a63376..000000000 --- a/docs/versioned-plugins/filters/aggregate-v2.6.1.asciidoc +++ /dev/null @@ -1,553 +0,0 @@ -:plugin: aggregate -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.6.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-aggregate/blob/v2.6.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Aggregate filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - - -<<{version}-plugins-{type}s-{plugin}-description>> + -<<{version}-plugins-{type}s-{plugin}-example1>> + -<<{version}-plugins-{type}s-{plugin}-example2>> + -<<{version}-plugins-{type}s-{plugin}-example3>> + -<<{version}-plugins-{type}s-{plugin}-example4>> + -<<{version}-plugins-{type}s-{plugin}-example5>> + -<<{version}-plugins-{type}s-{plugin}-howitworks>> + -<<{version}-plugins-{type}s-{plugin}-usecases>> + -<<{version}-plugins-{type}s-{plugin}-options>> + - - -[id="{version}-plugins-{type}s-{plugin}-description"] -==== Description - - -The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, -and finally push aggregated information into final task event. - -You should be very careful to set Logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly -otherwise events may be processed out of sequence and unexpected results will occur. - - -[id="{version}-plugins-{type}s-{plugin}-example1"] -==== Example #1 - -* with these given logs : - -[source,ruby] ----------------------------------- - INFO - 12345 - TASK_START - start - INFO - 12345 - SQL - sqlQuery1 - 12 - INFO - 12345 - SQL - sqlQuery2 - 34 - INFO - 12345 - TASK_END - end ----------------------------------- - -* you can aggregate "sql duration" for the whole task with this configuration : - -[source,ruby] ----------------------------------- - filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] - } - - if [logger] == "TASK_START" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] = 0" - map_action => "create" - } - } - - if [logger] == "SQL" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] += event.get('duration')" - map_action => "update" - } - } - - if [logger] == "TASK_END" { - aggregate { - task_id => "%{taskid}" - code => "event.set('sql_duration', map['sql_duration'])" - map_action => "update" - end_of_task => true - timeout => 120 - } - } - } ----------------------------------- - -* the final event then looks like : - -[source,ruby] ----------------------------------- -{ - "message" => "INFO - 12345 - TASK_END - end message", - "sql_duration" => 46 -} ----------------------------------- - -the field `sql_duration` is added and contains the sum of all sql queries durations. - - -[id="{version}-plugins-{type}s-{plugin}-example2"] -==== Example #2 : no start event - -* If you have the same logs than example #1, but without a start log : - -[source,ruby] ----------------------------------- - INFO - 12345 - SQL - sqlQuery1 - 12 - INFO - 12345 - SQL - sqlQuery2 - 34 - INFO - 12345 - TASK_END - end ----------------------------------- - -* you can also aggregate "sql duration" with a slightly different configuration : - -[source,ruby] ----------------------------------- - filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] - } - - if [logger] == "SQL" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')" - } - } - - if [logger] == "TASK_END" { - aggregate { - task_id => "%{taskid}" - code => "event.set('sql_duration', map['sql_duration'])" - end_of_task => true - timeout => 120 - } - } - } ----------------------------------- - -* the final event is exactly the same than example #1 -* the key point is the "||=" ruby operator. It allows to initialize 'sql_duration' map entry to 0 only if this map entry is not already initialized - - -[id="{version}-plugins-{type}s-{plugin}-example3"] -==== Example #3 : no end event - -Third use case: You have no specific end event. - -A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction. - -In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when a timeout occurs. -In addition, we can enable 'timeout_code' to execute code on the populated timeout event. -We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. - -* Given these logs: - -[source,ruby] ----------------------------------- -INFO - 12345 - Clicked One -INFO - 12345 - Clicked Two -INFO - 12345 - Clicked Three ----------------------------------- - -* You can aggregate the amount of clicks the user did like this: - -[source,ruby] ----------------------------------- -filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] - } - - aggregate { - task_id => "%{user_id}" - code => "map['clicks'] ||= 0; map['clicks'] += 1;" - push_map_as_event_on_timeout => true - timeout_task_id_field => "user_id" - timeout => 600 # 10 minutes timeout - timeout_tags => ['_aggregatetimeout'] - timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" - } -} ----------------------------------- - -* After ten minutes, this will yield an event like: - -[source,json] ----------------------------------- -{ - "user_id": "12345", - "clicks": 3, - "several_clicks": true, - "tags": [ - "_aggregatetimeout" - ] -} ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-example4"] -==== Example #4 : no end event and tasks come one after the other - -Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other. + -That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, ... + -In that case, you don't want to wait task timeout to flush aggregation map. + - -* A typical case is aggregating results from jdbc input plugin. -* Given that you have this SQL query : `SELECT country_name, town_name FROM town` -* Using jdbc input plugin, you get these 3 events from : - -[source,json] ----------------------------------- - { "country_name": "France", "town_name": "Paris" } - { "country_name": "France", "town_name": "Marseille" } - { "country_name": "USA", "town_name": "New-York" } ----------------------------------- - -* And you would like these 2 result events to push them into elasticsearch : - -[source,json] ----------------------------------- - { "country_name": "France", "towns": [ {"town_name": "Paris"}, {"town_name": "Marseille"} ] } - { "country_name": "USA", "towns": [ {"town_name": "New-York"} ] } ----------------------------------- - -* You can do that using `push_previous_map_as_event` aggregate plugin option : - -[source,ruby] ----------------------------------- - filter { - aggregate { - task_id => "%{country_name}" - code => " - map['country_name'] = event.get('country_name') - map['towns'] ||= [] - map['towns'] << {'town_name' => event.get('town_name')} - event.cancel() - " - push_previous_map_as_event => true - timeout => 3 - } - } ----------------------------------- - -* The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next country -* When 5s timeout comes, the last aggregate map is pushed as a new event -* Finally, initial events (which are not aggregated) are dropped because useless (thanks to `event.cancel()`) - - -[id="{version}-plugins-{type}s-{plugin}-example5"] -==== Example #5 : no end event and push events as soon as possible - -Fifth use case: like example #3, there is no end event. + -Events keep comming for an indefinite time and you want to push the aggregation map as soon as possible after the last user interaction without waiting for the `timeout`. + -This allows to have the aggregated events pushed closer to real time. + - -A typical case is aggregating or tracking user behaviour. + -We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. + -There is no specific event indicating the end of the user's interaction. + -The user ineraction will be considered as ended when no events for the specified user (task_id) arrive after the specified inactivity_timeout`. + -If the user continues interacting for longer than `timeout` seconds (since first event), the aggregation map will still be deleted and pushed as a new event when timeout occurs. + -The difference with example #3 is that the events will be pushed as soon as the user stops interacting for `inactivity_timeout` seconds instead of waiting for the end of `timeout` seconds since first event. - -In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when inactivity timeout occurs. + -In addition, we can enable 'timeout_code' to execute code on the populated timeout event. + -We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + - -* Given these logs: - -[source,ruby] ----------------------------------- -INFO - 12345 - Clicked One -INFO - 12345 - Clicked Two -INFO - 12345 - Clicked Three ----------------------------------- - -* You can aggregate the amount of clicks the user did like this: - -[source,ruby] ----------------------------------- -filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] - } - aggregate { - task_id => "%{user_id}" - code => "map['clicks'] ||= 0; map['clicks'] += 1;" - push_map_as_event_on_timeout => true - timeout_task_id_field => "user_id" - timeout => 3600 # 1 hour timeout, user activity will be considered finished one hour after the first event, even if events keep comming - inactivity_timeout => 300 # 5 minutes timeout, user activity will be considered finished if no new events arrive 5 minutes after the last event - timeout_tags => ['_aggregatetimeout'] - timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" - } -} ----------------------------------- - -* After five minutes of inactivity or one hour since first event, this will yield an event like: - -[source,json] ----------------------------------- -{ - "user_id": "12345", - "clicks": 3, - "several_clicks": true, - "tags": [ - "_aggregatetimeout" - ] -} ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-howitworks"] -==== How it works -* the filter needs a "task_id" to correlate events (log lines) of a same task -* at the task beggining, filter creates a map, attached to task_id -* for each event, you can execute code using 'event' and 'map' (for instance, copy an event field to map) -* in the final event, you can execute a last code (for instance, add map data to final event) -* after the final event, the map attached to task is deleted (thanks to `end_of_task => true`) -* an aggregate map is tied to one task_id value which is tied to one task_id pattern. So if you have 2 filters with different task_id patterns, even if you have same task_id value, they won't share the same aggregate map. -* in one filter configuration, it is recommanded to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps -* if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted -* all timeout options have to be defined in only one aggregate filter per task_id pattern. Timeout options are : timeout, inactivity_timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags -* if `code` execution raises an exception, the error is logged and event is tagged '_aggregateexception' - - -[id="{version}-plugins-{type}s-{plugin}-usecases"] -==== Use Cases -* extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2) -* extract error information in any task log line, and push it in final task event (to get a final event with all error information if any) -* extract all back-end calls as a list, and push this list in final task event (to get a task profile) -* extract all http headers logged in several lines to push this list in final task event (complete http request info) -* for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, ...) -* Finally, task id can be any correlation id matching your need : it can be a session id, a file path, ... - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Aggregate Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-aggregate_maps_path>> |<>, a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-end_of_task>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-map_action>> |<>, one of `["create", "update", "create_or_update"]`|No -| <<{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-push_previous_map_as_event>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-task_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_code>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_tags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_task_id_field>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-aggregate_maps_path"] -===== `aggregate_maps_path` - - * Value type is <> - * There is no default value for this setting. - -The path to file where aggregate maps are stored when Logstash stops -and are loaded from when Logstash starts. - -If not defined, aggregate maps will not be stored at Logstash stop and will be lost. -Must be defined in only one aggregate filter (as aggregate maps are global). - -Example: -[source,ruby] - filter { - aggregate { - aggregate_maps_path => "/path/to/.aggregate_maps" - } - } - -[id="{version}-plugins-{type}s-{plugin}-code"] -===== `code` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The code to execute to update map, using current event. - -Or on the contrary, the code to execute to update event, using current map. - -You will have a 'map' variable and an 'event' variable available (that is the event itself). - -Example: -[source,ruby] - filter { - aggregate { - code => "map['sql_duration'] += event.get('duration')" - } - } - -[id="{version}-plugins-{type}s-{plugin}-end_of_task"] -===== `end_of_task` - - * Value type is <> - * Default value is `false` - -Tell the filter that task is ended, and therefore, to delete aggregate map after code execution. - -[id="{version}-plugins-{type}s-{plugin}-inactivity_timeout"] -===== `inactivity_timeout` - - * Value type is <> - * There is no default value for this setting. - -The amount of seconds (since the last event) after which a task is considered as expired. - -When timeout occurs for a task, its aggregate map is evicted. - -If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. - -`inactivity_timeout` can be defined for each "task_id" pattern. - -`inactivity_timeout` must be lower than `timeout`. - -[id="{version}-plugins-{type}s-{plugin}-map_action"] -===== `map_action` - - * Value type is <> - * Default value is `"create_or_update"` - -Tell the filter what to do with aggregate map. - -`"create"`: create the map, and execute the code only if map wasn't created before - -`"update"`: doesn't create the map, and execute the code only if map was created before - -`"create_or_update"`: create the map if it wasn't created before, execute the code in all cases - -[id="{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout"] -===== `push_map_as_event_on_timeout` - - * Value type is <> - * Default value is `false` - -When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new Logstash event. -This enables to detect and process task timeouts in Logstash, but also to manage tasks that have no explicit end event. - -[id="{version}-plugins-{type}s-{plugin}-push_previous_map_as_event"] -===== `push_previous_map_as_event` - - * Value type is <> - * Default value is `false` - -When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new Logstash event, -and then creates a new empty map for the next task. - -WARNING: this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc... - -[id="{version}-plugins-{type}s-{plugin}-task_id"] -===== `task_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The expression defining task ID to correlate logs. - -This value must uniquely identify the task. - -Example: -[source,ruby] - filter { - aggregate { - task_id => "%{type}%{my_task_id}" - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `1800` - -The amount of seconds (since the first event) after which a task is considered as expired. - -When timeout occurs for a task, its aggregate map is evicted. - -If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. - -Timeout can be defined for each "task_id" pattern. - -[id="{version}-plugins-{type}s-{plugin}-timeout_code"] -===== `timeout_code` - - * Value type is <> - * There is no default value for this setting. - -The code to execute to complete timeout generated event, when `'push_map_as_event_on_timeout'` or `'push_previous_map_as_event'` is set to true. -The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map. - -If `'timeout_task_id_field'` is set, the event is also populated with the task_id value - -Example: -[source,ruby] - filter { - aggregate { - timeout_code => "event.set('state', 'timeout')" - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout_tags"] -===== `timeout_tags` - - * Value type is <> - * Default value is `[]` - -Defines tags to add when a timeout event is generated and yield - -Example: -[source,ruby] - filter { - aggregate { - timeout_tags => ["aggregate_timeout'] - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout_task_id_field"] -===== `timeout_task_id_field` - - * Value type is <> - * There is no default value for this setting. - -This option indicates the timeout generated event's field for the "task_id" value. -The task id will then be set into the timeout event. This can help correlate which tasks have been timed out. - -For example, with option `timeout_task_id_field => "my_id"` ,when timeout task id is `"12345"`, the generated timeout event will contain `'my_id' => '12345'`. - -By default, if this option is not set, task id value won't be set into timeout generated event. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/aggregate-v2.6.3.asciidoc b/docs/versioned-plugins/filters/aggregate-v2.6.3.asciidoc deleted file mode 100644 index 6c50f8c70..000000000 --- a/docs/versioned-plugins/filters/aggregate-v2.6.3.asciidoc +++ /dev/null @@ -1,542 +0,0 @@ -:plugin: aggregate -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.6.3 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-aggregate/blob/v2.6.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Aggregate filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - - -[id="{version}-plugins-{type}s-{plugin}-description"] -==== Description - - -The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, -and finally push aggregated information into final task event. - -You should be very careful to set Logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly -otherwise events may be processed out of sequence and unexpected results will occur. - - -[id="{version}-plugins-{type}s-{plugin}-example1"] -==== Example #1 - -* with these given logs : - -[source,ruby] ----------------------------------- - INFO - 12345 - TASK_START - start - INFO - 12345 - SQL - sqlQuery1 - 12 - INFO - 12345 - SQL - sqlQuery2 - 34 - INFO - 12345 - TASK_END - end ----------------------------------- - -* you can aggregate "sql duration" for the whole task with this configuration : - -[source,ruby] ----------------------------------- - filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] - } - - if [logger] == "TASK_START" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] = 0" - map_action => "create" - } - } - - if [logger] == "SQL" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] += event.get('duration')" - map_action => "update" - } - } - - if [logger] == "TASK_END" { - aggregate { - task_id => "%{taskid}" - code => "event.set('sql_duration', map['sql_duration'])" - map_action => "update" - end_of_task => true - timeout => 120 - } - } - } ----------------------------------- - -* the final event then looks like : - -[source,ruby] ----------------------------------- -{ - "message" => "INFO - 12345 - TASK_END - end message", - "sql_duration" => 46 -} ----------------------------------- - -the field `sql_duration` is added and contains the sum of all sql queries durations. - - -[id="{version}-plugins-{type}s-{plugin}-example2"] -==== Example #2 : no start event - -* If you have the same logs than example #1, but without a start log : - -[source,ruby] ----------------------------------- - INFO - 12345 - SQL - sqlQuery1 - 12 - INFO - 12345 - SQL - sqlQuery2 - 34 - INFO - 12345 - TASK_END - end ----------------------------------- - -* you can also aggregate "sql duration" with a slightly different configuration : - -[source,ruby] ----------------------------------- - filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] - } - - if [logger] == "SQL" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')" - } - } - - if [logger] == "TASK_END" { - aggregate { - task_id => "%{taskid}" - code => "event.set('sql_duration', map['sql_duration'])" - end_of_task => true - timeout => 120 - } - } - } ----------------------------------- - -* the final event is exactly the same than example #1 -* the key point is the "||=" ruby operator. It allows to initialize 'sql_duration' map entry to 0 only if this map entry is not already initialized - - -[id="{version}-plugins-{type}s-{plugin}-example3"] -==== Example #3 : no end event - -Third use case: You have no specific end event. - -A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction. - -In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when a timeout occurs. -In addition, we can enable 'timeout_code' to execute code on the populated timeout event. -We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. - -* Given these logs: - -[source,ruby] ----------------------------------- -INFO - 12345 - Clicked One -INFO - 12345 - Clicked Two -INFO - 12345 - Clicked Three ----------------------------------- - -* You can aggregate the amount of clicks the user did like this: - -[source,ruby] ----------------------------------- -filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] - } - - aggregate { - task_id => "%{user_id}" - code => "map['clicks'] ||= 0; map['clicks'] += 1;" - push_map_as_event_on_timeout => true - timeout_task_id_field => "user_id" - timeout => 600 # 10 minutes timeout - timeout_tags => ['_aggregatetimeout'] - timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" - } -} ----------------------------------- - -* After ten minutes, this will yield an event like: - -[source,json] ----------------------------------- -{ - "user_id": "12345", - "clicks": 3, - "several_clicks": true, - "tags": [ - "_aggregatetimeout" - ] -} ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-example4"] -==== Example #4 : no end event and tasks come one after the other - -Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other. + -That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, ... + -In that case, you don't want to wait task timeout to flush aggregation map. + - -* A typical case is aggregating results from jdbc input plugin. -* Given that you have this SQL query : `SELECT country_name, town_name FROM town` -* Using jdbc input plugin, you get these 3 events from : - -[source,json] ----------------------------------- - { "country_name": "France", "town_name": "Paris" } - { "country_name": "France", "town_name": "Marseille" } - { "country_name": "USA", "town_name": "New-York" } ----------------------------------- - -* And you would like these 2 result events to push them into elasticsearch : - -[source,json] ----------------------------------- - { "country_name": "France", "towns": [ {"town_name": "Paris"}, {"town_name": "Marseille"} ] } - { "country_name": "USA", "towns": [ {"town_name": "New-York"} ] } ----------------------------------- - -* You can do that using `push_previous_map_as_event` aggregate plugin option : - -[source,ruby] ----------------------------------- - filter { - aggregate { - task_id => "%{country_name}" - code => " - map['country_name'] = event.get('country_name') - map['towns'] ||= [] - map['towns'] << {'town_name' => event.get('town_name')} - event.cancel() - " - push_previous_map_as_event => true - timeout => 3 - } - } ----------------------------------- - -* The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next country -* When 5s timeout comes, the last aggregate map is pushed as a new event -* Finally, initial events (which are not aggregated) are dropped because useless (thanks to `event.cancel()`) - - -[id="{version}-plugins-{type}s-{plugin}-example5"] -==== Example #5 : no end event and push events as soon as possible - -Fifth use case: like example #3, there is no end event. + -Events keep comming for an indefinite time and you want to push the aggregation map as soon as possible after the last user interaction without waiting for the `timeout`. + -This allows to have the aggregated events pushed closer to real time. + - -A typical case is aggregating or tracking user behaviour. + -We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. + -There is no specific event indicating the end of the user's interaction. + -The user ineraction will be considered as ended when no events for the specified user (task_id) arrive after the specified inactivity_timeout`. + -If the user continues interacting for longer than `timeout` seconds (since first event), the aggregation map will still be deleted and pushed as a new event when timeout occurs. + -The difference with example #3 is that the events will be pushed as soon as the user stops interacting for `inactivity_timeout` seconds instead of waiting for the end of `timeout` seconds since first event. - -In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when inactivity timeout occurs. + -In addition, we can enable 'timeout_code' to execute code on the populated timeout event. + -We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + - -* Given these logs: - -[source,ruby] ----------------------------------- -INFO - 12345 - Clicked One -INFO - 12345 - Clicked Two -INFO - 12345 - Clicked Three ----------------------------------- - -* You can aggregate the amount of clicks the user did like this: - -[source,ruby] ----------------------------------- -filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] - } - aggregate { - task_id => "%{user_id}" - code => "map['clicks'] ||= 0; map['clicks'] += 1;" - push_map_as_event_on_timeout => true - timeout_task_id_field => "user_id" - timeout => 3600 # 1 hour timeout, user activity will be considered finished one hour after the first event, even if events keep comming - inactivity_timeout => 300 # 5 minutes timeout, user activity will be considered finished if no new events arrive 5 minutes after the last event - timeout_tags => ['_aggregatetimeout'] - timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" - } -} ----------------------------------- - -* After five minutes of inactivity or one hour since first event, this will yield an event like: - -[source,json] ----------------------------------- -{ - "user_id": "12345", - "clicks": 3, - "several_clicks": true, - "tags": [ - "_aggregatetimeout" - ] -} ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-howitworks"] -==== How it works -* the filter needs a "task_id" to correlate events (log lines) of a same task -* at the task beginning, filter creates a map, attached to task_id -* for each event, you can execute code using 'event' and 'map' (for instance, copy an event field to map) -* in the final event, you can execute a last code (for instance, add map data to final event) -* after the final event, the map attached to task is deleted (thanks to `end_of_task => true`) -* an aggregate map is tied to one task_id value which is tied to one task_id pattern. So if you have 2 filters with different task_id patterns, even if you have same task_id value, they won't share the same aggregate map. -* in one filter configuration, it is recommanded to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps -* if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted -* all timeout options have to be defined in only one aggregate filter per task_id pattern. Timeout options are : timeout, inactivity_timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags -* if `code` execution raises an exception, the error is logged and event is tagged '_aggregateexception' - - -[id="{version}-plugins-{type}s-{plugin}-usecases"] -==== Use Cases -* extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2) -* extract error information in any task log line, and push it in final task event (to get a final event with all error information if any) -* extract all back-end calls as a list, and push this list in final task event (to get a task profile) -* extract all http headers logged in several lines to push this list in final task event (complete http request info) -* for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, ...) -* Finally, task id can be any correlation id matching your need : it can be a session id, a file path, ... - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Aggregate Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-aggregate_maps_path>> |<>, a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-end_of_task>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-map_action>> |<>, one of `["create", "update", "create_or_update"]`|No -| <<{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-push_previous_map_as_event>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-task_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_code>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_tags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_task_id_field>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-aggregate_maps_path"] -===== `aggregate_maps_path` - - * Value type is <> - * There is no default value for this setting. - -The path to file where aggregate maps are stored when Logstash stops -and are loaded from when Logstash starts. - -If not defined, aggregate maps will not be stored at Logstash stop and will be lost. -Must be defined in only one aggregate filter (as aggregate maps are global). - -Example: -[source,ruby] - filter { - aggregate { - aggregate_maps_path => "/path/to/.aggregate_maps" - } - } - -[id="{version}-plugins-{type}s-{plugin}-code"] -===== `code` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The code to execute to update map, using current event. - -Or on the contrary, the code to execute to update event, using current map. - -You will have a 'map' variable and an 'event' variable available (that is the event itself). - -Example: -[source,ruby] - filter { - aggregate { - code => "map['sql_duration'] += event.get('duration')" - } - } - -[id="{version}-plugins-{type}s-{plugin}-end_of_task"] -===== `end_of_task` - - * Value type is <> - * Default value is `false` - -Tell the filter that task is ended, and therefore, to delete aggregate map after code execution. - -[id="{version}-plugins-{type}s-{plugin}-inactivity_timeout"] -===== `inactivity_timeout` - - * Value type is <> - * There is no default value for this setting. - -The amount of seconds (since the last event) after which a task is considered as expired. - -When timeout occurs for a task, its aggregate map is evicted. - -If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. - -`inactivity_timeout` can be defined for each "task_id" pattern. - -`inactivity_timeout` must be lower than `timeout`. - -[id="{version}-plugins-{type}s-{plugin}-map_action"] -===== `map_action` - - * Value type is <> - * Default value is `"create_or_update"` - -Tell the filter what to do with aggregate map. - -`"create"`: create the map, and execute the code only if map wasn't created before - -`"update"`: doesn't create the map, and execute the code only if map was created before - -`"create_or_update"`: create the map if it wasn't created before, execute the code in all cases - -[id="{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout"] -===== `push_map_as_event_on_timeout` - - * Value type is <> - * Default value is `false` - -When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new Logstash event. -This enables to detect and process task timeouts in Logstash, but also to manage tasks that have no explicit end event. - -[id="{version}-plugins-{type}s-{plugin}-push_previous_map_as_event"] -===== `push_previous_map_as_event` - - * Value type is <> - * Default value is `false` - -When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new Logstash event, -and then creates a new empty map for the next task. - -WARNING: this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc... - -[id="{version}-plugins-{type}s-{plugin}-task_id"] -===== `task_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The expression defining task ID to correlate logs. - -This value must uniquely identify the task. - -Example: -[source,ruby] - filter { - aggregate { - task_id => "%{type}%{my_task_id}" - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `1800` - -The amount of seconds (since the first event) after which a task is considered as expired. - -When timeout occurs for a task, its aggregate map is evicted. - -If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. - -Timeout can be defined for each "task_id" pattern. - -[id="{version}-plugins-{type}s-{plugin}-timeout_code"] -===== `timeout_code` - - * Value type is <> - * There is no default value for this setting. - -The code to execute to complete timeout generated event, when `'push_map_as_event_on_timeout'` or `'push_previous_map_as_event'` is set to true. -The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map. - -If `'timeout_task_id_field'` is set, the event is also populated with the task_id value - -Example: -[source,ruby] - filter { - aggregate { - timeout_code => "event.set('state', 'timeout')" - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout_tags"] -===== `timeout_tags` - - * Value type is <> - * Default value is `[]` - -Defines tags to add when a timeout event is generated and yield - -Example: -[source,ruby] - filter { - aggregate { - timeout_tags => ["aggregate_timeout'] - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout_task_id_field"] -===== `timeout_task_id_field` - - * Value type is <> - * There is no default value for this setting. - -This option indicates the timeout generated event's field for the "task_id" value. -The task id will then be set into the timeout event. This can help correlate which tasks have been timed out. - -For example, with option `timeout_task_id_field => "my_id"` ,when timeout task id is `"12345"`, the generated timeout event will contain `'my_id' => '12345'`. - -By default, if this option is not set, task id value won't be set into timeout generated event. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/aggregate-v2.6.4.asciidoc b/docs/versioned-plugins/filters/aggregate-v2.6.4.asciidoc deleted file mode 100644 index c4e816ba0..000000000 --- a/docs/versioned-plugins/filters/aggregate-v2.6.4.asciidoc +++ /dev/null @@ -1,542 +0,0 @@ -:plugin: aggregate -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.6.4 -:release_date: 2017-10-10 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-aggregate/blob/v2.6.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Aggregate filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - - -[id="{version}-plugins-{type}s-{plugin}-description"] -==== Description - - -The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, -and finally push aggregated information into final task event. - -You should be very careful to set Logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly -otherwise events may be processed out of sequence and unexpected results will occur. - - -[id="{version}-plugins-{type}s-{plugin}-example1"] -==== Example #1 - -* with these given logs : - -[source,ruby] ----------------------------------- - INFO - 12345 - TASK_START - start - INFO - 12345 - SQL - sqlQuery1 - 12 - INFO - 12345 - SQL - sqlQuery2 - 34 - INFO - 12345 - TASK_END - end ----------------------------------- - -* you can aggregate "sql duration" for the whole task with this configuration : - -[source,ruby] ----------------------------------- - filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] - } - - if [logger] == "TASK_START" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] = 0" - map_action => "create" - } - } - - if [logger] == "SQL" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] += event.get('duration')" - map_action => "update" - } - } - - if [logger] == "TASK_END" { - aggregate { - task_id => "%{taskid}" - code => "event.set('sql_duration', map['sql_duration'])" - map_action => "update" - end_of_task => true - timeout => 120 - } - } - } ----------------------------------- - -* the final event then looks like : - -[source,ruby] ----------------------------------- -{ - "message" => "INFO - 12345 - TASK_END - end message", - "sql_duration" => 46 -} ----------------------------------- - -the field `sql_duration` is added and contains the sum of all sql queries durations. - - -[id="{version}-plugins-{type}s-{plugin}-example2"] -==== Example #2 : no start event - -* If you have the same logs than example #1, but without a start log : - -[source,ruby] ----------------------------------- - INFO - 12345 - SQL - sqlQuery1 - 12 - INFO - 12345 - SQL - sqlQuery2 - 34 - INFO - 12345 - TASK_END - end ----------------------------------- - -* you can also aggregate "sql duration" with a slightly different configuration : - -[source,ruby] ----------------------------------- - filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] - } - - if [logger] == "SQL" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')" - } - } - - if [logger] == "TASK_END" { - aggregate { - task_id => "%{taskid}" - code => "event.set('sql_duration', map['sql_duration'])" - end_of_task => true - timeout => 120 - } - } - } ----------------------------------- - -* the final event is exactly the same than example #1 -* the key point is the "||=" ruby operator. It allows to initialize 'sql_duration' map entry to 0 only if this map entry is not already initialized - - -[id="{version}-plugins-{type}s-{plugin}-example3"] -==== Example #3 : no end event - -Third use case: You have no specific end event. - -A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction. - -In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when a timeout occurs. -In addition, we can enable 'timeout_code' to execute code on the populated timeout event. -We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. - -* Given these logs: - -[source,ruby] ----------------------------------- -INFO - 12345 - Clicked One -INFO - 12345 - Clicked Two -INFO - 12345 - Clicked Three ----------------------------------- - -* You can aggregate the amount of clicks the user did like this: - -[source,ruby] ----------------------------------- -filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] - } - - aggregate { - task_id => "%{user_id}" - code => "map['clicks'] ||= 0; map['clicks'] += 1;" - push_map_as_event_on_timeout => true - timeout_task_id_field => "user_id" - timeout => 600 # 10 minutes timeout - timeout_tags => ['_aggregatetimeout'] - timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" - } -} ----------------------------------- - -* After ten minutes, this will yield an event like: - -[source,json] ----------------------------------- -{ - "user_id": "12345", - "clicks": 3, - "several_clicks": true, - "tags": [ - "_aggregatetimeout" - ] -} ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-example4"] -==== Example #4 : no end event and tasks come one after the other - -Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other. + -That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, ... + -In that case, you don't want to wait task timeout to flush aggregation map. + - -* A typical case is aggregating results from jdbc input plugin. -* Given that you have this SQL query : `SELECT country_name, town_name FROM town` -* Using jdbc input plugin, you get these 3 events from : - -[source,json] ----------------------------------- - { "country_name": "France", "town_name": "Paris" } - { "country_name": "France", "town_name": "Marseille" } - { "country_name": "USA", "town_name": "New-York" } ----------------------------------- - -* And you would like these 2 result events to push them into elasticsearch : - -[source,json] ----------------------------------- - { "country_name": "France", "towns": [ {"town_name": "Paris"}, {"town_name": "Marseille"} ] } - { "country_name": "USA", "towns": [ {"town_name": "New-York"} ] } ----------------------------------- - -* You can do that using `push_previous_map_as_event` aggregate plugin option : - -[source,ruby] ----------------------------------- - filter { - aggregate { - task_id => "%{country_name}" - code => " - map['country_name'] = event.get('country_name') - map['towns'] ||= [] - map['towns'] << {'town_name' => event.get('town_name')} - event.cancel() - " - push_previous_map_as_event => true - timeout => 3 - } - } ----------------------------------- - -* The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next country -* When 5s timeout comes, the last aggregate map is pushed as a new event -* Finally, initial events (which are not aggregated) are dropped because useless (thanks to `event.cancel()`) - - -[id="{version}-plugins-{type}s-{plugin}-example5"] -==== Example #5 : no end event and push events as soon as possible - -Fifth use case: like example #3, there is no end event. + -Events keep comming for an indefinite time and you want to push the aggregation map as soon as possible after the last user interaction without waiting for the `timeout`. + -This allows to have the aggregated events pushed closer to real time. + - -A typical case is aggregating or tracking user behaviour. + -We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. + -There is no specific event indicating the end of the user's interaction. + -The user ineraction will be considered as ended when no events for the specified user (task_id) arrive after the specified inactivity_timeout`. + -If the user continues interacting for longer than `timeout` seconds (since first event), the aggregation map will still be deleted and pushed as a new event when timeout occurs. + -The difference with example #3 is that the events will be pushed as soon as the user stops interacting for `inactivity_timeout` seconds instead of waiting for the end of `timeout` seconds since first event. - -In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when inactivity timeout occurs. + -In addition, we can enable 'timeout_code' to execute code on the populated timeout event. + -We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. + - -* Given these logs: - -[source,ruby] ----------------------------------- -INFO - 12345 - Clicked One -INFO - 12345 - Clicked Two -INFO - 12345 - Clicked Three ----------------------------------- - -* You can aggregate the amount of clicks the user did like this: - -[source,ruby] ----------------------------------- -filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] - } - aggregate { - task_id => "%{user_id}" - code => "map['clicks'] ||= 0; map['clicks'] += 1;" - push_map_as_event_on_timeout => true - timeout_task_id_field => "user_id" - timeout => 3600 # 1 hour timeout, user activity will be considered finished one hour after the first event, even if events keep comming - inactivity_timeout => 300 # 5 minutes timeout, user activity will be considered finished if no new events arrive 5 minutes after the last event - timeout_tags => ['_aggregatetimeout'] - timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" - } -} ----------------------------------- - -* After five minutes of inactivity or one hour since first event, this will yield an event like: - -[source,json] ----------------------------------- -{ - "user_id": "12345", - "clicks": 3, - "several_clicks": true, - "tags": [ - "_aggregatetimeout" - ] -} ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-howitworks"] -==== How it works -* the filter needs a "task_id" to correlate events (log lines) of a same task -* at the task beginning, filter creates a map, attached to task_id -* for each event, you can execute code using 'event' and 'map' (for instance, copy an event field to map) -* in the final event, you can execute a last code (for instance, add map data to final event) -* after the final event, the map attached to task is deleted (thanks to `end_of_task => true`) -* an aggregate map is tied to one task_id value which is tied to one task_id pattern. So if you have 2 filters with different task_id patterns, even if you have same task_id value, they won't share the same aggregate map. -* in one filter configuration, it is recommanded to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps -* if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted -* all timeout options have to be defined in only one aggregate filter per task_id pattern. Timeout options are : timeout, inactivity_timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags -* if `code` execution raises an exception, the error is logged and event is tagged '_aggregateexception' - - -[id="{version}-plugins-{type}s-{plugin}-usecases"] -==== Use Cases -* extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2) -* extract error information in any task log line, and push it in final task event (to get a final event with all error information if any) -* extract all back-end calls as a list, and push this list in final task event (to get a task profile) -* extract all http headers logged in several lines to push this list in final task event (complete http request info) -* for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, ...) -* Finally, task id can be any correlation id matching your need : it can be a session id, a file path, ... - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Aggregate Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-aggregate_maps_path>> |<>, a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-end_of_task>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-map_action>> |<>, one of `["create", "update", "create_or_update"]`|No -| <<{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-push_previous_map_as_event>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-task_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_code>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_tags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_task_id_field>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-aggregate_maps_path"] -===== `aggregate_maps_path` - - * Value type is <> - * There is no default value for this setting. - -The path to file where aggregate maps are stored when Logstash stops -and are loaded from when Logstash starts. - -If not defined, aggregate maps will not be stored at Logstash stop and will be lost. -Must be defined in only one aggregate filter (as aggregate maps are global). - -Example: -[source,ruby] - filter { - aggregate { - aggregate_maps_path => "/path/to/.aggregate_maps" - } - } - -[id="{version}-plugins-{type}s-{plugin}-code"] -===== `code` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The code to execute to update map, using current event. - -Or on the contrary, the code to execute to update event, using current map. - -You will have a 'map' variable and an 'event' variable available (that is the event itself). - -Example: -[source,ruby] - filter { - aggregate { - code => "map['sql_duration'] += event.get('duration')" - } - } - -[id="{version}-plugins-{type}s-{plugin}-end_of_task"] -===== `end_of_task` - - * Value type is <> - * Default value is `false` - -Tell the filter that task is ended, and therefore, to delete aggregate map after code execution. - -[id="{version}-plugins-{type}s-{plugin}-inactivity_timeout"] -===== `inactivity_timeout` - - * Value type is <> - * There is no default value for this setting. - -The amount of seconds (since the last event) after which a task is considered as expired. - -When timeout occurs for a task, its aggregate map is evicted. - -If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. - -`inactivity_timeout` can be defined for each "task_id" pattern. - -`inactivity_timeout` must be lower than `timeout`. - -[id="{version}-plugins-{type}s-{plugin}-map_action"] -===== `map_action` - - * Value type is <> - * Default value is `"create_or_update"` - -Tell the filter what to do with aggregate map. - -`"create"`: create the map, and execute the code only if map wasn't created before - -`"update"`: doesn't create the map, and execute the code only if map was created before - -`"create_or_update"`: create the map if it wasn't created before, execute the code in all cases - -[id="{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout"] -===== `push_map_as_event_on_timeout` - - * Value type is <> - * Default value is `false` - -When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new Logstash event. -This enables to detect and process task timeouts in Logstash, but also to manage tasks that have no explicit end event. - -[id="{version}-plugins-{type}s-{plugin}-push_previous_map_as_event"] -===== `push_previous_map_as_event` - - * Value type is <> - * Default value is `false` - -When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new Logstash event, -and then creates a new empty map for the next task. - -WARNING: this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc... - -[id="{version}-plugins-{type}s-{plugin}-task_id"] -===== `task_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The expression defining task ID to correlate logs. - -This value must uniquely identify the task. - -Example: -[source,ruby] - filter { - aggregate { - task_id => "%{type}%{my_task_id}" - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `1800` - -The amount of seconds (since the first event) after which a task is considered as expired. - -When timeout occurs for a task, its aggregate map is evicted. - -If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. - -Timeout can be defined for each "task_id" pattern. - -[id="{version}-plugins-{type}s-{plugin}-timeout_code"] -===== `timeout_code` - - * Value type is <> - * There is no default value for this setting. - -The code to execute to complete timeout generated event, when `'push_map_as_event_on_timeout'` or `'push_previous_map_as_event'` is set to true. -The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map. - -If `'timeout_task_id_field'` is set, the event is also populated with the task_id value - -Example: -[source,ruby] - filter { - aggregate { - timeout_code => "event.set('state', 'timeout')" - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout_tags"] -===== `timeout_tags` - - * Value type is <> - * Default value is `[]` - -Defines tags to add when a timeout event is generated and yield - -Example: -[source,ruby] - filter { - aggregate { - timeout_tags => ["aggregate_timeout'] - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout_task_id_field"] -===== `timeout_task_id_field` - - * Value type is <> - * There is no default value for this setting. - -This option indicates the timeout generated event's field for the "task_id" value. -The task id will then be set into the timeout event. This can help correlate which tasks have been timed out. - -For example, with option `timeout_task_id_field => "my_id"` ,when timeout task id is `"12345"`, the generated timeout event will contain `'my_id' => '12345'`. - -By default, if this option is not set, task id value won't be set into timeout generated event. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/aggregate-v2.7.0.asciidoc b/docs/versioned-plugins/filters/aggregate-v2.7.0.asciidoc deleted file mode 100644 index 2ed7e5eb5..000000000 --- a/docs/versioned-plugins/filters/aggregate-v2.7.0.asciidoc +++ /dev/null @@ -1,555 +0,0 @@ -:plugin: aggregate -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.7.0 -:release_date: 2017-11-03 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-aggregate/blob/v2.7.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Aggregate filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - - -[id="{version}-plugins-{type}s-{plugin}-description"] -==== Description - - -The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, -and finally push aggregated information into final task event. - -You should be very careful to set Logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly -otherwise events may be processed out of sequence and unexpected results will occur. - - -[id="{version}-plugins-{type}s-{plugin}-example1"] -==== Example #1 - -* with these given logs : - -[source,ruby] ----------------------------------- - INFO - 12345 - TASK_START - start - INFO - 12345 - SQL - sqlQuery1 - 12 - INFO - 12345 - SQL - sqlQuery2 - 34 - INFO - 12345 - TASK_END - end ----------------------------------- - -* you can aggregate "sql duration" for the whole task with this configuration : - -[source,ruby] ----------------------------------- - filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] - } - - if [logger] == "TASK_START" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] = 0" - map_action => "create" - } - } - - if [logger] == "SQL" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] += event.get('duration')" - map_action => "update" - } - } - - if [logger] == "TASK_END" { - aggregate { - task_id => "%{taskid}" - code => "event.set('sql_duration', map['sql_duration'])" - map_action => "update" - end_of_task => true - timeout => 120 - } - } - } ----------------------------------- - -* the final event then looks like : - -[source,ruby] ----------------------------------- -{ - "message" => "INFO - 12345 - TASK_END - end message", - "sql_duration" => 46 -} ----------------------------------- - -the field `sql_duration` is added and contains the sum of all sql queries durations. - - -[id="{version}-plugins-{type}s-{plugin}-example2"] -==== Example #2 : no start event - -* If you have the same logs than example #1, but without a start log : - -[source,ruby] ----------------------------------- - INFO - 12345 - SQL - sqlQuery1 - 12 - INFO - 12345 - SQL - sqlQuery2 - 34 - INFO - 12345 - TASK_END - end ----------------------------------- - -* you can also aggregate "sql duration" with a slightly different configuration : - -[source,ruby] ----------------------------------- - filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] - } - - if [logger] == "SQL" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')" - } - } - - if [logger] == "TASK_END" { - aggregate { - task_id => "%{taskid}" - code => "event.set('sql_duration', map['sql_duration'])" - end_of_task => true - timeout => 120 - } - } - } ----------------------------------- - -* the final event is exactly the same than example #1 -* the key point is the "||=" ruby operator. It allows to initialize 'sql_duration' map entry to 0 only if this map entry is not already initialized - - -[id="{version}-plugins-{type}s-{plugin}-example3"] -==== Example #3 : no end event - -Third use case: You have no specific end event. - -A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction. - -In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when a timeout occurs. -In addition, we can enable 'timeout_code' to execute code on the populated timeout event. -We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. - -* Given these logs: - -[source,ruby] ----------------------------------- -INFO - 12345 - Clicked One -INFO - 12345 - Clicked Two -INFO - 12345 - Clicked Three ----------------------------------- - -* You can aggregate the amount of clicks the user did like this: - -[source,ruby] ----------------------------------- -filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] - } - - aggregate { - task_id => "%{user_id}" - code => "map['clicks'] ||= 0; map['clicks'] += 1;" - push_map_as_event_on_timeout => true - timeout_task_id_field => "user_id" - timeout => 600 # 10 minutes timeout - timeout_tags => ['_aggregatetimeout'] - timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" - } -} ----------------------------------- - -* After ten minutes, this will yield an event like: - -[source,json] ----------------------------------- -{ - "user_id": "12345", - "clicks": 3, - "several_clicks": true, - "tags": [ - "_aggregatetimeout" - ] -} ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-example4"] -==== Example #4 : no end event and tasks come one after the other - -Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other. - -That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, ... - -In that case, you don't want to wait task timeout to flush aggregation map. - -* A typical case is aggregating results from jdbc input plugin. -* Given that you have this SQL query : `SELECT country_name, town_name FROM town` -* Using jdbc input plugin, you get these 3 events from : - -[source,json] ----------------------------------- - { "country_name": "France", "town_name": "Paris" } - { "country_name": "France", "town_name": "Marseille" } - { "country_name": "USA", "town_name": "New-York" } ----------------------------------- - -* And you would like these 2 result events to push them into elasticsearch : - -[source,json] ----------------------------------- - { "country_name": "France", "towns": [ {"town_name": "Paris"}, {"town_name": "Marseille"} ] } - { "country_name": "USA", "towns": [ {"town_name": "New-York"} ] } ----------------------------------- - -* You can do that using `push_previous_map_as_event` aggregate plugin option : - -[source,ruby] ----------------------------------- - filter { - aggregate { - task_id => "%{country_name}" - code => " - map['country_name'] = event.get('country_name') - map['towns'] ||= [] - map['towns'] << {'town_name' => event.get('town_name')} - event.cancel() - " - push_previous_map_as_event => true - timeout => 3 - } - } ----------------------------------- - -* The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next country -* When 5s timeout comes, the last aggregate map is pushed as a new event -* Finally, initial events (which are not aggregated) are dropped because useless (thanks to `event.cancel()`) - - -[id="{version}-plugins-{type}s-{plugin}-example5"] -==== Example #5 : no end event and push events as soon as possible - -Fifth use case: like example #3, there is no end event. - -Events keep comming for an indefinite time and you want to push the aggregation map as soon as possible after the last user interaction without waiting for the `timeout`. - -This allows to have the aggregated events pushed closer to real time. - - -A typical case is aggregating or tracking user behaviour. - -We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. - -There is no specific event indicating the end of the user's interaction. - -The user ineraction will be considered as ended when no events for the specified user (task_id) arrive after the specified inactivity_timeout`. - -If the user continues interacting for longer than `timeout` seconds (since first event), the aggregation map will still be deleted and pushed as a new event when timeout occurs. - -The difference with example #3 is that the events will be pushed as soon as the user stops interacting for `inactivity_timeout` seconds instead of waiting for the end of `timeout` seconds since first event. - -In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when inactivity timeout occurs. - -In addition, we can enable 'timeout_code' to execute code on the populated timeout event. - -We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. - - -* Given these logs: - -[source,ruby] ----------------------------------- -INFO - 12345 - Clicked One -INFO - 12345 - Clicked Two -INFO - 12345 - Clicked Three ----------------------------------- - -* You can aggregate the amount of clicks the user did like this: - -[source,ruby] ----------------------------------- -filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] - } - aggregate { - task_id => "%{user_id}" - code => "map['clicks'] ||= 0; map['clicks'] += 1;" - push_map_as_event_on_timeout => true - timeout_task_id_field => "user_id" - timeout => 3600 # 1 hour timeout, user activity will be considered finished one hour after the first event, even if events keep comming - inactivity_timeout => 300 # 5 minutes timeout, user activity will be considered finished if no new events arrive 5 minutes after the last event - timeout_tags => ['_aggregatetimeout'] - timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" - } -} ----------------------------------- - -* After five minutes of inactivity or one hour since first event, this will yield an event like: - -[source,json] ----------------------------------- -{ - "user_id": "12345", - "clicks": 3, - "several_clicks": true, - "tags": [ - "_aggregatetimeout" - ] -} ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-howitworks"] -==== How it works -* the filter needs a "task_id" to correlate events (log lines) of a same task -* at the task beginning, filter creates a map, attached to task_id -* for each event, you can execute code using 'event' and 'map' (for instance, copy an event field to map) -* in the final event, you can execute a last code (for instance, add map data to final event) -* after the final event, the map attached to task is deleted (thanks to `end_of_task => true`) -* an aggregate map is tied to one task_id value which is tied to one task_id pattern. So if you have 2 filters with different task_id patterns, even if you have same task_id value, they won't share the same aggregate map. -* in one filter configuration, it is recommanded to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps -* if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted -* all timeout options have to be defined in only one aggregate filter per task_id pattern (per pipeline). Timeout options are : timeout, inactivity_timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags -* if `code` execution raises an exception, the error is logged and event is tagged '_aggregateexception' - - -[id="{version}-plugins-{type}s-{plugin}-usecases"] -==== Use Cases -* extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2) -* extract error information in any task log line, and push it in final task event (to get a final event with all error information if any) -* extract all back-end calls as a list, and push this list in final task event (to get a task profile) -* extract all http headers logged in several lines to push this list in final task event (complete http request info) -* for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, ...) -* Finally, task id can be any correlation id matching your need : it can be a session id, a file path, ... - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Aggregate Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-aggregate_maps_path>> |<>, a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-end_of_task>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-map_action>> |<>, one of `["create", "update", "create_or_update"]`|No -| <<{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-push_previous_map_as_event>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-task_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_code>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_tags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_task_id_field>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-aggregate_maps_path"] -===== `aggregate_maps_path` - - * Value type is <> - * There is no default value for this setting. - -The path to file where aggregate maps are stored when Logstash stops -and are loaded from when Logstash starts. - -If not defined, aggregate maps will not be stored at Logstash stop and will be lost. -Must be defined in only one aggregate filter per pipeline (as aggregate maps are shared at pipeline level). - -Example: -[source,ruby] - filter { - aggregate { - aggregate_maps_path => "/path/to/.aggregate_maps" - } - } - -[id="{version}-plugins-{type}s-{plugin}-code"] -===== `code` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The code to execute to update map, using current event. - -Or on the contrary, the code to execute to update event, using current map. - -You will have a 'map' variable and an 'event' variable available (that is the event itself). - -Example: -[source,ruby] - filter { - aggregate { - code => "map['sql_duration'] += event.get('duration')" - } - } - -[id="{version}-plugins-{type}s-{plugin}-end_of_task"] -===== `end_of_task` - - * Value type is <> - * Default value is `false` - -Tell the filter that task is ended, and therefore, to delete aggregate map after code execution. - -[id="{version}-plugins-{type}s-{plugin}-inactivity_timeout"] -===== `inactivity_timeout` - - * Value type is <> - * There is no default value for this setting. - -The amount of seconds (since the last event) after which a task is considered as expired. - -When timeout occurs for a task, its aggregate map is evicted. - -If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. - -`inactivity_timeout` can be defined for each "task_id" pattern. - -`inactivity_timeout` must be lower than `timeout`. - -[id="{version}-plugins-{type}s-{plugin}-map_action"] -===== `map_action` - - * Value type is <> - * Default value is `"create_or_update"` - -Tell the filter what to do with aggregate map. - -`"create"`: create the map, and execute the code only if map wasn't created before - -`"update"`: doesn't create the map, and execute the code only if map was created before - -`"create_or_update"`: create the map if it wasn't created before, execute the code in all cases - -[id="{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout"] -===== `push_map_as_event_on_timeout` - - * Value type is <> - * Default value is `false` - -When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new Logstash event. -This enables to detect and process task timeouts in Logstash, but also to manage tasks that have no explicit end event. - -[id="{version}-plugins-{type}s-{plugin}-push_previous_map_as_event"] -===== `push_previous_map_as_event` - - * Value type is <> - * Default value is `false` - -When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new Logstash event, -and then creates a new empty map for the next task. - -WARNING: this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc... - -[id="{version}-plugins-{type}s-{plugin}-task_id"] -===== `task_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The expression defining task ID to correlate logs. - -This value must uniquely identify the task. - -Example: -[source,ruby] - filter { - aggregate { - task_id => "%{type}%{my_task_id}" - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `1800` - -The amount of seconds (since the first event) after which a task is considered as expired. - -When timeout occurs for a task, its aggregate map is evicted. - -If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. - -Timeout can be defined for each "task_id" pattern. - -[id="{version}-plugins-{type}s-{plugin}-timeout_code"] -===== `timeout_code` - - * Value type is <> - * There is no default value for this setting. - -The code to execute to complete timeout generated event, when `'push_map_as_event_on_timeout'` or `'push_previous_map_as_event'` is set to true. -The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map. - -If `'timeout_task_id_field'` is set, the event is also populated with the task_id value - -Example: -[source,ruby] - filter { - aggregate { - timeout_code => "event.set('state', 'timeout')" - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout_tags"] -===== `timeout_tags` - - * Value type is <> - * Default value is `[]` - -Defines tags to add when a timeout event is generated and yield - -Example: -[source,ruby] - filter { - aggregate { - timeout_tags => ["aggregate_timeout'] - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout_task_id_field"] -===== `timeout_task_id_field` - - * Value type is <> - * There is no default value for this setting. - -This option indicates the timeout generated event's field for the "task_id" value. -The task id will then be set into the timeout event. This can help correlate which tasks have been timed out. - -For example, with option `timeout_task_id_field => "my_id"` ,when timeout task id is `"12345"`, the generated timeout event will contain `'my_id' => '12345'`. - -By default, if this option is not set, task id value won't be set into timeout generated event. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/aggregate-v2.7.1.asciidoc b/docs/versioned-plugins/filters/aggregate-v2.7.1.asciidoc deleted file mode 100644 index b3f64704d..000000000 --- a/docs/versioned-plugins/filters/aggregate-v2.7.1.asciidoc +++ /dev/null @@ -1,555 +0,0 @@ -:plugin: aggregate -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.7.1 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-aggregate/blob/v2.7.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Aggregate filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - - -[id="{version}-plugins-{type}s-{plugin}-description"] -==== Description - - -The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, -and finally push aggregated information into final task event. - -You should be very careful to set Logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly -otherwise events may be processed out of sequence and unexpected results will occur. - - -[id="{version}-plugins-{type}s-{plugin}-example1"] -==== Example #1 - -* with these given logs : - -[source,ruby] ----------------------------------- - INFO - 12345 - TASK_START - start - INFO - 12345 - SQL - sqlQuery1 - 12 - INFO - 12345 - SQL - sqlQuery2 - 34 - INFO - 12345 - TASK_END - end ----------------------------------- - -* you can aggregate "sql duration" for the whole task with this configuration : - -[source,ruby] ----------------------------------- - filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] - } - - if [logger] == "TASK_START" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] = 0" - map_action => "create" - } - } - - if [logger] == "SQL" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] += event.get('duration')" - map_action => "update" - } - } - - if [logger] == "TASK_END" { - aggregate { - task_id => "%{taskid}" - code => "event.set('sql_duration', map['sql_duration'])" - map_action => "update" - end_of_task => true - timeout => 120 - } - } - } ----------------------------------- - -* the final event then looks like : - -[source,ruby] ----------------------------------- -{ - "message" => "INFO - 12345 - TASK_END - end message", - "sql_duration" => 46 -} ----------------------------------- - -the field `sql_duration` is added and contains the sum of all sql queries durations. - - -[id="{version}-plugins-{type}s-{plugin}-example2"] -==== Example #2 : no start event - -* If you have the same logs than example #1, but without a start log : - -[source,ruby] ----------------------------------- - INFO - 12345 - SQL - sqlQuery1 - 12 - INFO - 12345 - SQL - sqlQuery2 - 34 - INFO - 12345 - TASK_END - end ----------------------------------- - -* you can also aggregate "sql duration" with a slightly different configuration : - -[source,ruby] ----------------------------------- - filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] - } - - if [logger] == "SQL" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')" - } - } - - if [logger] == "TASK_END" { - aggregate { - task_id => "%{taskid}" - code => "event.set('sql_duration', map['sql_duration'])" - end_of_task => true - timeout => 120 - } - } - } ----------------------------------- - -* the final event is exactly the same than example #1 -* the key point is the "||=" ruby operator. It allows to initialize 'sql_duration' map entry to 0 only if this map entry is not already initialized - - -[id="{version}-plugins-{type}s-{plugin}-example3"] -==== Example #3 : no end event - -Third use case: You have no specific end event. - -A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction. - -In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when a timeout occurs. -In addition, we can enable 'timeout_code' to execute code on the populated timeout event. -We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. - -* Given these logs: - -[source,ruby] ----------------------------------- -INFO - 12345 - Clicked One -INFO - 12345 - Clicked Two -INFO - 12345 - Clicked Three ----------------------------------- - -* You can aggregate the amount of clicks the user did like this: - -[source,ruby] ----------------------------------- -filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] - } - - aggregate { - task_id => "%{user_id}" - code => "map['clicks'] ||= 0; map['clicks'] += 1;" - push_map_as_event_on_timeout => true - timeout_task_id_field => "user_id" - timeout => 600 # 10 minutes timeout - timeout_tags => ['_aggregatetimeout'] - timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" - } -} ----------------------------------- - -* After ten minutes, this will yield an event like: - -[source,json] ----------------------------------- -{ - "user_id": "12345", - "clicks": 3, - "several_clicks": true, - "tags": [ - "_aggregatetimeout" - ] -} ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-example4"] -==== Example #4 : no end event and tasks come one after the other - -Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other. - -That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, ... - -In that case, you don't want to wait task timeout to flush aggregation map. - -* A typical case is aggregating results from jdbc input plugin. -* Given that you have this SQL query : `SELECT country_name, town_name FROM town` -* Using jdbc input plugin, you get these 3 events from : - -[source,json] ----------------------------------- - { "country_name": "France", "town_name": "Paris" } - { "country_name": "France", "town_name": "Marseille" } - { "country_name": "USA", "town_name": "New-York" } ----------------------------------- - -* And you would like these 2 result events to push them into elasticsearch : - -[source,json] ----------------------------------- - { "country_name": "France", "towns": [ {"town_name": "Paris"}, {"town_name": "Marseille"} ] } - { "country_name": "USA", "towns": [ {"town_name": "New-York"} ] } ----------------------------------- - -* You can do that using `push_previous_map_as_event` aggregate plugin option : - -[source,ruby] ----------------------------------- - filter { - aggregate { - task_id => "%{country_name}" - code => " - map['country_name'] = event.get('country_name') - map['towns'] ||= [] - map['towns'] << {'town_name' => event.get('town_name')} - event.cancel() - " - push_previous_map_as_event => true - timeout => 3 - } - } ----------------------------------- - -* The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next country -* When 5s timeout comes, the last aggregate map is pushed as a new event -* Finally, initial events (which are not aggregated) are dropped because useless (thanks to `event.cancel()`) - - -[id="{version}-plugins-{type}s-{plugin}-example5"] -==== Example #5 : no end event and push events as soon as possible - -Fifth use case: like example #3, there is no end event. - -Events keep comming for an indefinite time and you want to push the aggregation map as soon as possible after the last user interaction without waiting for the `timeout`. - -This allows to have the aggregated events pushed closer to real time. - - -A typical case is aggregating or tracking user behaviour. - -We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. - -There is no specific event indicating the end of the user's interaction. - -The user ineraction will be considered as ended when no events for the specified user (task_id) arrive after the specified inactivity_timeout`. - -If the user continues interacting for longer than `timeout` seconds (since first event), the aggregation map will still be deleted and pushed as a new event when timeout occurs. - -The difference with example #3 is that the events will be pushed as soon as the user stops interacting for `inactivity_timeout` seconds instead of waiting for the end of `timeout` seconds since first event. - -In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when inactivity timeout occurs. - -In addition, we can enable 'timeout_code' to execute code on the populated timeout event. - -We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. - - -* Given these logs: - -[source,ruby] ----------------------------------- -INFO - 12345 - Clicked One -INFO - 12345 - Clicked Two -INFO - 12345 - Clicked Three ----------------------------------- - -* You can aggregate the amount of clicks the user did like this: - -[source,ruby] ----------------------------------- -filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] - } - aggregate { - task_id => "%{user_id}" - code => "map['clicks'] ||= 0; map['clicks'] += 1;" - push_map_as_event_on_timeout => true - timeout_task_id_field => "user_id" - timeout => 3600 # 1 hour timeout, user activity will be considered finished one hour after the first event, even if events keep comming - inactivity_timeout => 300 # 5 minutes timeout, user activity will be considered finished if no new events arrive 5 minutes after the last event - timeout_tags => ['_aggregatetimeout'] - timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" - } -} ----------------------------------- - -* After five minutes of inactivity or one hour since first event, this will yield an event like: - -[source,json] ----------------------------------- -{ - "user_id": "12345", - "clicks": 3, - "several_clicks": true, - "tags": [ - "_aggregatetimeout" - ] -} ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-howitworks"] -==== How it works -* the filter needs a "task_id" to correlate events (log lines) of a same task -* at the task beginning, filter creates a map, attached to task_id -* for each event, you can execute code using 'event' and 'map' (for instance, copy an event field to map) -* in the final event, you can execute a last code (for instance, add map data to final event) -* after the final event, the map attached to task is deleted (thanks to `end_of_task => true`) -* an aggregate map is tied to one task_id value which is tied to one task_id pattern. So if you have 2 filters with different task_id patterns, even if you have same task_id value, they won't share the same aggregate map. -* in one filter configuration, it is recommanded to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps -* if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted -* all timeout options have to be defined in only one aggregate filter per task_id pattern (per pipeline). Timeout options are : timeout, inactivity_timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags -* if `code` execution raises an exception, the error is logged and event is tagged '_aggregateexception' - - -[id="{version}-plugins-{type}s-{plugin}-usecases"] -==== Use Cases -* extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2) -* extract error information in any task log line, and push it in final task event (to get a final event with all error information if any) -* extract all back-end calls as a list, and push this list in final task event (to get a task profile) -* extract all http headers logged in several lines to push this list in final task event (complete http request info) -* for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, ...) -* Finally, task id can be any correlation id matching your need : it can be a session id, a file path, ... - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Aggregate Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-aggregate_maps_path>> |<>, a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-end_of_task>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-map_action>> |<>, one of `["create", "update", "create_or_update"]`|No -| <<{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-push_previous_map_as_event>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-task_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_code>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_tags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_task_id_field>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-aggregate_maps_path"] -===== `aggregate_maps_path` - - * Value type is <> - * There is no default value for this setting. - -The path to file where aggregate maps are stored when Logstash stops -and are loaded from when Logstash starts. - -If not defined, aggregate maps will not be stored at Logstash stop and will be lost. -Must be defined in only one aggregate filter per pipeline (as aggregate maps are shared at pipeline level). - -Example: -[source,ruby] - filter { - aggregate { - aggregate_maps_path => "/path/to/.aggregate_maps" - } - } - -[id="{version}-plugins-{type}s-{plugin}-code"] -===== `code` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The code to execute to update map, using current event. - -Or on the contrary, the code to execute to update event, using current map. - -You will have a 'map' variable and an 'event' variable available (that is the event itself). - -Example: -[source,ruby] - filter { - aggregate { - code => "map['sql_duration'] += event.get('duration')" - } - } - -[id="{version}-plugins-{type}s-{plugin}-end_of_task"] -===== `end_of_task` - - * Value type is <> - * Default value is `false` - -Tell the filter that task is ended, and therefore, to delete aggregate map after code execution. - -[id="{version}-plugins-{type}s-{plugin}-inactivity_timeout"] -===== `inactivity_timeout` - - * Value type is <> - * There is no default value for this setting. - -The amount of seconds (since the last event) after which a task is considered as expired. - -When timeout occurs for a task, its aggregate map is evicted. - -If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. - -`inactivity_timeout` can be defined for each "task_id" pattern. - -`inactivity_timeout` must be lower than `timeout`. - -[id="{version}-plugins-{type}s-{plugin}-map_action"] -===== `map_action` - - * Value type is <> - * Default value is `"create_or_update"` - -Tell the filter what to do with aggregate map. - -`"create"`: create the map, and execute the code only if map wasn't created before - -`"update"`: doesn't create the map, and execute the code only if map was created before - -`"create_or_update"`: create the map if it wasn't created before, execute the code in all cases - -[id="{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout"] -===== `push_map_as_event_on_timeout` - - * Value type is <> - * Default value is `false` - -When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new Logstash event. -This enables to detect and process task timeouts in Logstash, but also to manage tasks that have no explicit end event. - -[id="{version}-plugins-{type}s-{plugin}-push_previous_map_as_event"] -===== `push_previous_map_as_event` - - * Value type is <> - * Default value is `false` - -When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new Logstash event, -and then creates a new empty map for the next task. - -WARNING: this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc... - -[id="{version}-plugins-{type}s-{plugin}-task_id"] -===== `task_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The expression defining task ID to correlate logs. - -This value must uniquely identify the task. - -Example: -[source,ruby] - filter { - aggregate { - task_id => "%{type}%{my_task_id}" - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `1800` - -The amount of seconds (since the first event) after which a task is considered as expired. - -When timeout occurs for a task, its aggregate map is evicted. - -If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. - -Timeout can be defined for each "task_id" pattern. - -[id="{version}-plugins-{type}s-{plugin}-timeout_code"] -===== `timeout_code` - - * Value type is <> - * There is no default value for this setting. - -The code to execute to complete timeout generated event, when `'push_map_as_event_on_timeout'` or `'push_previous_map_as_event'` is set to true. -The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map. - -If `'timeout_task_id_field'` is set, the event is also populated with the task_id value - -Example: -[source,ruby] - filter { - aggregate { - timeout_code => "event.set('state', 'timeout')" - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout_tags"] -===== `timeout_tags` - - * Value type is <> - * Default value is `[]` - -Defines tags to add when a timeout event is generated and yield - -Example: -[source,ruby] - filter { - aggregate { - timeout_tags => ["aggregate_timeout'] - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout_task_id_field"] -===== `timeout_task_id_field` - - * Value type is <> - * There is no default value for this setting. - -This option indicates the timeout generated event's field for the "task_id" value. -The task id will then be set into the timeout event. This can help correlate which tasks have been timed out. - -For example, with option `timeout_task_id_field => "my_id"` ,when timeout task id is `"12345"`, the generated timeout event will contain `'my_id' => '12345'`. - -By default, if this option is not set, task id value won't be set into timeout generated event. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/aggregate-v2.7.2.asciidoc b/docs/versioned-plugins/filters/aggregate-v2.7.2.asciidoc deleted file mode 100644 index a81646ccc..000000000 --- a/docs/versioned-plugins/filters/aggregate-v2.7.2.asciidoc +++ /dev/null @@ -1,555 +0,0 @@ -:plugin: aggregate -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.7.2 -:release_date: 2017-11-16 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-aggregate/blob/v2.7.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Aggregate filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - - -[id="{version}-plugins-{type}s-{plugin}-description"] -==== Description - - -The aim of this filter is to aggregate information available among several events (typically log lines) belonging to a same task, -and finally push aggregated information into final task event. - -You should be very careful to set Logstash filter workers to 1 (`-w 1` flag) for this filter to work correctly -otherwise events may be processed out of sequence and unexpected results will occur. - - -[id="{version}-plugins-{type}s-{plugin}-example1"] -==== Example #1 - -* with these given logs : - -[source,ruby] ----------------------------------- - INFO - 12345 - TASK_START - start - INFO - 12345 - SQL - sqlQuery1 - 12 - INFO - 12345 - SQL - sqlQuery2 - 34 - INFO - 12345 - TASK_END - end ----------------------------------- - -* you can aggregate "sql duration" for the whole task with this configuration : - -[source,ruby] ----------------------------------- - filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] - } - - if [logger] == "TASK_START" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] = 0" - map_action => "create" - } - } - - if [logger] == "SQL" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] += event.get('duration')" - map_action => "update" - } - } - - if [logger] == "TASK_END" { - aggregate { - task_id => "%{taskid}" - code => "event.set('sql_duration', map['sql_duration'])" - map_action => "update" - end_of_task => true - timeout => 120 - } - } - } ----------------------------------- - -* the final event then looks like : - -[source,ruby] ----------------------------------- -{ - "message" => "INFO - 12345 - TASK_END - end message", - "sql_duration" => 46 -} ----------------------------------- - -the field `sql_duration` is added and contains the sum of all sql queries durations. - - -[id="{version}-plugins-{type}s-{plugin}-example2"] -==== Example #2 : no start event - -* If you have the same logs than example #1, but without a start log : - -[source,ruby] ----------------------------------- - INFO - 12345 - SQL - sqlQuery1 - 12 - INFO - 12345 - SQL - sqlQuery2 - 34 - INFO - 12345 - TASK_END - end ----------------------------------- - -* you can also aggregate "sql duration" with a slightly different configuration : - -[source,ruby] ----------------------------------- - filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:taskid} - %{NOTSPACE:logger} - %{WORD:label}( - %{INT:duration:int})?" ] - } - - if [logger] == "SQL" { - aggregate { - task_id => "%{taskid}" - code => "map['sql_duration'] ||= 0 ; map['sql_duration'] += event.get('duration')" - } - } - - if [logger] == "TASK_END" { - aggregate { - task_id => "%{taskid}" - code => "event.set('sql_duration', map['sql_duration'])" - end_of_task => true - timeout => 120 - } - } - } ----------------------------------- - -* the final event is exactly the same than example #1 -* the key point is the "||=" ruby operator. It allows to initialize 'sql_duration' map entry to 0 only if this map entry is not already initialized - - -[id="{version}-plugins-{type}s-{plugin}-example3"] -==== Example #3 : no end event - -Third use case: You have no specific end event. - -A typical case is aggregating or tracking user behaviour. We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. There is no specific event indicating the end of the user's interaction. - -In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when a timeout occurs. -In addition, we can enable 'timeout_code' to execute code on the populated timeout event. -We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. - -* Given these logs: - -[source,ruby] ----------------------------------- -INFO - 12345 - Clicked One -INFO - 12345 - Clicked Two -INFO - 12345 - Clicked Three ----------------------------------- - -* You can aggregate the amount of clicks the user did like this: - -[source,ruby] ----------------------------------- -filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] - } - - aggregate { - task_id => "%{user_id}" - code => "map['clicks'] ||= 0; map['clicks'] += 1;" - push_map_as_event_on_timeout => true - timeout_task_id_field => "user_id" - timeout => 600 # 10 minutes timeout - timeout_tags => ['_aggregatetimeout'] - timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" - } -} ----------------------------------- - -* After ten minutes, this will yield an event like: - -[source,json] ----------------------------------- -{ - "user_id": "12345", - "clicks": 3, - "several_clicks": true, - "tags": [ - "_aggregatetimeout" - ] -} ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-example4"] -==== Example #4 : no end event and tasks come one after the other - -Fourth use case : like example #3, you have no specific end event, but also, tasks come one after the other. - -That is to say : tasks are not interlaced. All task1 events come, then all task2 events come, ... - -In that case, you don't want to wait task timeout to flush aggregation map. - -* A typical case is aggregating results from jdbc input plugin. -* Given that you have this SQL query : `SELECT country_name, town_name FROM town` -* Using jdbc input plugin, you get these 3 events from : - -[source,json] ----------------------------------- - { "country_name": "France", "town_name": "Paris" } - { "country_name": "France", "town_name": "Marseille" } - { "country_name": "USA", "town_name": "New-York" } ----------------------------------- - -* And you would like these 2 result events to push them into elasticsearch : - -[source,json] ----------------------------------- - { "country_name": "France", "towns": [ {"town_name": "Paris"}, {"town_name": "Marseille"} ] } - { "country_name": "USA", "towns": [ {"town_name": "New-York"} ] } ----------------------------------- - -* You can do that using `push_previous_map_as_event` aggregate plugin option : - -[source,ruby] ----------------------------------- - filter { - aggregate { - task_id => "%{country_name}" - code => " - map['country_name'] = event.get('country_name') - map['towns'] ||= [] - map['towns'] << {'town_name' => event.get('town_name')} - event.cancel() - " - push_previous_map_as_event => true - timeout => 3 - } - } ----------------------------------- - -* The key point is that each time aggregate plugin detects a new `country_name`, it pushes previous aggregate map as a new Logstash event, and then creates a new empty map for the next country -* When 5s timeout comes, the last aggregate map is pushed as a new event -* Finally, initial events (which are not aggregated) are dropped because useless (thanks to `event.cancel()`) - - -[id="{version}-plugins-{type}s-{plugin}-example5"] -==== Example #5 : no end event and push events as soon as possible - -Fifth use case: like example #3, there is no end event. - -Events keep comming for an indefinite time and you want to push the aggregation map as soon as possible after the last user interaction without waiting for the `timeout`. - -This allows to have the aggregated events pushed closer to real time. - - -A typical case is aggregating or tracking user behaviour. - -We can track a user by its ID through the events, however once the user stops interacting, the events stop coming in. - -There is no specific event indicating the end of the user's interaction. - -The user ineraction will be considered as ended when no events for the specified user (task_id) arrive after the specified inactivity_timeout`. - -If the user continues interacting for longer than `timeout` seconds (since first event), the aggregation map will still be deleted and pushed as a new event when timeout occurs. - -The difference with example #3 is that the events will be pushed as soon as the user stops interacting for `inactivity_timeout` seconds instead of waiting for the end of `timeout` seconds since first event. - -In this case, we can enable the option 'push_map_as_event_on_timeout' to enable pushing the aggregation map as a new event when inactivity timeout occurs. - -In addition, we can enable 'timeout_code' to execute code on the populated timeout event. - -We can also add 'timeout_task_id_field' so we can correlate the task_id, which in this case would be the user's ID. - - -* Given these logs: - -[source,ruby] ----------------------------------- -INFO - 12345 - Clicked One -INFO - 12345 - Clicked Two -INFO - 12345 - Clicked Three ----------------------------------- - -* You can aggregate the amount of clicks the user did like this: - -[source,ruby] ----------------------------------- -filter { - grok { - match => [ "message", "%{LOGLEVEL:loglevel} - %{NOTSPACE:user_id} - %{GREEDYDATA:msg_text}" ] - } - aggregate { - task_id => "%{user_id}" - code => "map['clicks'] ||= 0; map['clicks'] += 1;" - push_map_as_event_on_timeout => true - timeout_task_id_field => "user_id" - timeout => 3600 # 1 hour timeout, user activity will be considered finished one hour after the first event, even if events keep comming - inactivity_timeout => 300 # 5 minutes timeout, user activity will be considered finished if no new events arrive 5 minutes after the last event - timeout_tags => ['_aggregatetimeout'] - timeout_code => "event.set('several_clicks', event.get('clicks') > 1)" - } -} ----------------------------------- - -* After five minutes of inactivity or one hour since first event, this will yield an event like: - -[source,json] ----------------------------------- -{ - "user_id": "12345", - "clicks": 3, - "several_clicks": true, - "tags": [ - "_aggregatetimeout" - ] -} ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-howitworks"] -==== How it works -* the filter needs a "task_id" to correlate events (log lines) of a same task -* at the task beginning, filter creates a map, attached to task_id -* for each event, you can execute code using 'event' and 'map' (for instance, copy an event field to map) -* in the final event, you can execute a last code (for instance, add map data to final event) -* after the final event, the map attached to task is deleted (thanks to `end_of_task => true`) -* an aggregate map is tied to one task_id value which is tied to one task_id pattern. So if you have 2 filters with different task_id patterns, even if you have same task_id value, they won't share the same aggregate map. -* in one filter configuration, it is recommanded to define a timeout option to protect the feature against unterminated tasks. It tells the filter to delete expired maps -* if no timeout is defined, by default, all maps older than 1800 seconds are automatically deleted -* all timeout options have to be defined in only one aggregate filter per task_id pattern (per pipeline). Timeout options are : timeout, inactivity_timeout, timeout_code, push_map_as_event_on_timeout, push_previous_map_as_event, timeout_task_id_field, timeout_tags -* if `code` execution raises an exception, the error is logged and event is tagged '_aggregateexception' - - -[id="{version}-plugins-{type}s-{plugin}-usecases"] -==== Use Cases -* extract some cool metrics from task logs and push them into task final log event (like in example #1 and #2) -* extract error information in any task log line, and push it in final task event (to get a final event with all error information if any) -* extract all back-end calls as a list, and push this list in final task event (to get a task profile) -* extract all http headers logged in several lines to push this list in final task event (complete http request info) -* for every back-end call, collect call details available on several lines, analyse it and finally tag final back-end call log line (error, timeout, business-warning, ...) -* Finally, task id can be any correlation id matching your need : it can be a session id, a file path, ... - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Aggregate Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-aggregate_maps_path>> |<>, a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-end_of_task>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-map_action>> |<>, one of `["create", "update", "create_or_update"]`|No -| <<{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-push_previous_map_as_event>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-task_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_code>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_tags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_task_id_field>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-aggregate_maps_path"] -===== `aggregate_maps_path` - - * Value type is <> - * There is no default value for this setting. - -The path to file where aggregate maps are stored when Logstash stops -and are loaded from when Logstash starts. - -If not defined, aggregate maps will not be stored at Logstash stop and will be lost. -Must be defined in only one aggregate filter per pipeline (as aggregate maps are shared at pipeline level). - -Example: -[source,ruby] - filter { - aggregate { - aggregate_maps_path => "/path/to/.aggregate_maps" - } - } - -[id="{version}-plugins-{type}s-{plugin}-code"] -===== `code` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The code to execute to update map, using current event. - -Or on the contrary, the code to execute to update event, using current map. - -You will have a 'map' variable and an 'event' variable available (that is the event itself). - -Example: -[source,ruby] - filter { - aggregate { - code => "map['sql_duration'] += event.get('duration')" - } - } - -[id="{version}-plugins-{type}s-{plugin}-end_of_task"] -===== `end_of_task` - - * Value type is <> - * Default value is `false` - -Tell the filter that task is ended, and therefore, to delete aggregate map after code execution. - -[id="{version}-plugins-{type}s-{plugin}-inactivity_timeout"] -===== `inactivity_timeout` - - * Value type is <> - * There is no default value for this setting. - -The amount of seconds (since the last event) after which a task is considered as expired. - -When timeout occurs for a task, its aggregate map is evicted. - -If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. - -`inactivity_timeout` can be defined for each "task_id" pattern. - -`inactivity_timeout` must be lower than `timeout`. - -[id="{version}-plugins-{type}s-{plugin}-map_action"] -===== `map_action` - - * Value type is <> - * Default value is `"create_or_update"` - -Tell the filter what to do with aggregate map. - -`"create"`: create the map, and execute the code only if map wasn't created before - -`"update"`: doesn't create the map, and execute the code only if map was created before - -`"create_or_update"`: create the map if it wasn't created before, execute the code in all cases - -[id="{version}-plugins-{type}s-{plugin}-push_map_as_event_on_timeout"] -===== `push_map_as_event_on_timeout` - - * Value type is <> - * Default value is `false` - -When this option is enabled, each time a task timeout is detected, it pushes task aggregation map as a new Logstash event. -This enables to detect and process task timeouts in Logstash, but also to manage tasks that have no explicit end event. - -[id="{version}-plugins-{type}s-{plugin}-push_previous_map_as_event"] -===== `push_previous_map_as_event` - - * Value type is <> - * Default value is `false` - -When this option is enabled, each time aggregate plugin detects a new task id, it pushes previous aggregate map as a new Logstash event, -and then creates a new empty map for the next task. - -WARNING: this option works fine only if tasks come one after the other. It means : all task1 events, then all task2 events, etc... - -[id="{version}-plugins-{type}s-{plugin}-task_id"] -===== `task_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The expression defining task ID to correlate logs. - -This value must uniquely identify the task. - -Example: -[source,ruby] - filter { - aggregate { - task_id => "%{type}%{my_task_id}" - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `1800` - -The amount of seconds (since the first event) after which a task is considered as expired. - -When timeout occurs for a task, its aggregate map is evicted. - -If 'push_map_as_event_on_timeout' or 'push_previous_map_as_event' is set to true, the task aggregation map is pushed as a new Logstash event. - -Timeout can be defined for each "task_id" pattern. - -[id="{version}-plugins-{type}s-{plugin}-timeout_code"] -===== `timeout_code` - - * Value type is <> - * There is no default value for this setting. - -The code to execute to complete timeout generated event, when `'push_map_as_event_on_timeout'` or `'push_previous_map_as_event'` is set to true. -The code block will have access to the newly generated timeout event that is pre-populated with the aggregation map. - -If `'timeout_task_id_field'` is set, the event is also populated with the task_id value - -Example: -[source,ruby] - filter { - aggregate { - timeout_code => "event.set('state', 'timeout')" - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout_tags"] -===== `timeout_tags` - - * Value type is <> - * Default value is `[]` - -Defines tags to add when a timeout event is generated and yield - -Example: -[source,ruby] - filter { - aggregate { - timeout_tags => ["aggregate_timeout'] - } - } - -[id="{version}-plugins-{type}s-{plugin}-timeout_task_id_field"] -===== `timeout_task_id_field` - - * Value type is <> - * There is no default value for this setting. - -This option indicates the timeout generated event's field for the "task_id" value. -The task id will then be set into the timeout event. This can help correlate which tasks have been timed out. - -For example, with option `timeout_task_id_field => "my_id"` ,when timeout task id is `"12345"`, the generated timeout event will contain `'my_id' => '12345'`. - -By default, if this option is not set, task id value won't be set into timeout generated event. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/alter-index.asciidoc b/docs/versioned-plugins/filters/alter-index.asciidoc deleted file mode 100644 index 3aaefc801..000000000 --- a/docs/versioned-plugins/filters/alter-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: alter -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::alter-v3.0.3.asciidoc[] -include::alter-v3.0.2.asciidoc[] -include::alter-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/alter-v3.0.1.asciidoc b/docs/versioned-plugins/filters/alter-v3.0.1.asciidoc deleted file mode 100644 index ff8373857..000000000 --- a/docs/versioned-plugins/filters/alter-v3.0.1.asciidoc +++ /dev/null @@ -1,111 +0,0 @@ -:plugin: alter -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-alter/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Alter filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The alter filter allows you to do general alterations to fields -that are not included in the normal mutate filter. - - -NOTE: The functionality provided by this plugin is likely to -be merged into the 'mutate' filter in future versions. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Alter Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-coalesce>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-condrewrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-condrewriteother>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-coalesce"] -===== `coalesce` - - * Value type is <> - * There is no default value for this setting. - -Sets the value of field_name to the first nonnull expression among its arguments. - -Example: -[source,ruby] - filter { - alter { - coalesce => [ - "field_name", "value1", "value2", "value3", ... - ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-condrewrite"] -===== `condrewrite` - - * Value type is <> - * There is no default value for this setting. - -Change the content of the field to the specified value -if the actual content is equal to the expected one. - -Example: -[source,ruby] - filter { - alter { - condrewrite => [ - "field_name", "expected_value", "new_value", - "field_name2", "expected_value2", "new_value2", - .... - ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-condrewriteother"] -===== `condrewriteother` - - * Value type is <> - * There is no default value for this setting. - -Change the content of the field to the specified value -if the content of another field is equal to the expected one. - -Example: -[source,ruby] - filter { - alter { - condrewriteother => [ - "field_name", "expected_value", "field_name_to_change", "value", - "field_name2", "expected_value2", "field_name_to_change2", "value2", - .... - ] - } - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/alter-v3.0.2.asciidoc b/docs/versioned-plugins/filters/alter-v3.0.2.asciidoc deleted file mode 100644 index 278317b0a..000000000 --- a/docs/versioned-plugins/filters/alter-v3.0.2.asciidoc +++ /dev/null @@ -1,111 +0,0 @@ -:plugin: alter -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-alter/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Alter filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The alter filter allows you to do general alterations to fields -that are not included in the normal mutate filter. - - -NOTE: The functionality provided by this plugin is likely to -be merged into the 'mutate' filter in future versions. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Alter Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-coalesce>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-condrewrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-condrewriteother>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-coalesce"] -===== `coalesce` - - * Value type is <> - * There is no default value for this setting. - -Sets the value of field_name to the first nonnull expression among its arguments. - -Example: -[source,ruby] - filter { - alter { - coalesce => [ - "field_name", "value1", "value2", "value3", ... - ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-condrewrite"] -===== `condrewrite` - - * Value type is <> - * There is no default value for this setting. - -Change the content of the field to the specified value -if the actual content is equal to the expected one. - -Example: -[source,ruby] - filter { - alter { - condrewrite => [ - "field_name", "expected_value", "new_value", - "field_name2", "expected_value2", "new_value2", - .... - ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-condrewriteother"] -===== `condrewriteother` - - * Value type is <> - * There is no default value for this setting. - -Change the content of the field to the specified value -if the content of another field is equal to the expected one. - -Example: -[source,ruby] - filter { - alter { - condrewriteother => [ - "field_name", "expected_value", "field_name_to_change", "value", - "field_name2", "expected_value2", "field_name_to_change2", "value2", - .... - ] - } - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/alter-v3.0.3.asciidoc b/docs/versioned-plugins/filters/alter-v3.0.3.asciidoc deleted file mode 100644 index fe83eb721..000000000 --- a/docs/versioned-plugins/filters/alter-v3.0.3.asciidoc +++ /dev/null @@ -1,111 +0,0 @@ -:plugin: alter -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-alter/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Alter filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The alter filter allows you to do general alterations to fields -that are not included in the normal mutate filter. - - -NOTE: The functionality provided by this plugin is likely to -be merged into the 'mutate' filter in future versions. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Alter Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-coalesce>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-condrewrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-condrewriteother>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-coalesce"] -===== `coalesce` - - * Value type is <> - * There is no default value for this setting. - -Sets the value of field_name to the first nonnull expression among its arguments. - -Example: -[source,ruby] - filter { - alter { - coalesce => [ - "field_name", "value1", "value2", "value3", ... - ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-condrewrite"] -===== `condrewrite` - - * Value type is <> - * There is no default value for this setting. - -Change the content of the field to the specified value -if the actual content is equal to the expected one. - -Example: -[source,ruby] - filter { - alter { - condrewrite => [ - "field_name", "expected_value", "new_value", - "field_name2", "expected_value2", "new_value2", - .... - ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-condrewriteother"] -===== `condrewriteother` - - * Value type is <> - * There is no default value for this setting. - -Change the content of the field to the specified value -if the content of another field is equal to the expected one. - -Example: -[source,ruby] - filter { - alter { - condrewriteother => [ - "field_name", "expected_value", "field_name_to_change", "value", - "field_name2", "expected_value2", "field_name_to_change2", "value2", - .... - ] - } - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/anonymize-index.asciidoc b/docs/versioned-plugins/filters/anonymize-index.asciidoc deleted file mode 100644 index 222460c1f..000000000 --- a/docs/versioned-plugins/filters/anonymize-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: anonymize -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::anonymize-v3.0.6.asciidoc[] -include::anonymize-v3.0.5.asciidoc[] -include::anonymize-v3.0.4.asciidoc[] - diff --git a/docs/versioned-plugins/filters/anonymize-v3.0.4.asciidoc b/docs/versioned-plugins/filters/anonymize-v3.0.4.asciidoc deleted file mode 100644 index 38a4d91bf..000000000 --- a/docs/versioned-plugins/filters/anonymize-v3.0.4.asciidoc +++ /dev/null @@ -1,77 +0,0 @@ -:plugin: anonymize -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-anonymize/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Anonymize filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -deprecated[3.0.3,We recommend that you use the <> instead.] - -Anonymize fields by replacing values with a consistent hash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Anonymize Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5", "MURMUR3", "IPV4_NETWORK"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-algorithm"] -===== `algorithm` - - * This is a required setting. - * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5`, `MURMUR3`, `IPV4_NETWORK` - * Default value is `"SHA1"` - -digest/hash type - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The fields to be anonymized - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Hashing key -When using MURMUR3 the key is ignored but must still be set. -When using IPV4_NETWORK key is the subnet prefix lentgh - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/anonymize-v3.0.5.asciidoc b/docs/versioned-plugins/filters/anonymize-v3.0.5.asciidoc deleted file mode 100644 index 71b548a86..000000000 --- a/docs/versioned-plugins/filters/anonymize-v3.0.5.asciidoc +++ /dev/null @@ -1,77 +0,0 @@ -:plugin: anonymize -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-anonymize/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Anonymize filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -deprecated[3.0.3,We recommend that you use the <> instead.] - -Anonymize fields by replacing values with a consistent hash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Anonymize Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5", "MURMUR3", "IPV4_NETWORK"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-algorithm"] -===== `algorithm` - - * This is a required setting. - * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5`, `MURMUR3`, `IPV4_NETWORK` - * Default value is `"SHA1"` - -digest/hash type - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The fields to be anonymized - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Hashing key -When using MURMUR3 the key is ignored but must still be set. -When using IPV4_NETWORK key is the subnet prefix lentgh - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/anonymize-v3.0.6.asciidoc b/docs/versioned-plugins/filters/anonymize-v3.0.6.asciidoc deleted file mode 100644 index 1d5a9d329..000000000 --- a/docs/versioned-plugins/filters/anonymize-v3.0.6.asciidoc +++ /dev/null @@ -1,77 +0,0 @@ -:plugin: anonymize -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-anonymize/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Anonymize filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -deprecated[3.0.3,We recommend that you use the <> instead.] - -Anonymize fields by replacing values with a consistent hash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Anonymize Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5", "MURMUR3", "IPV4_NETWORK"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-algorithm"] -===== `algorithm` - - * This is a required setting. - * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5`, `MURMUR3`, `IPV4_NETWORK` - * Default value is `"SHA1"` - -digest/hash type - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The fields to be anonymized - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Hashing key -When using MURMUR3 the key is ignored but must still be set. -When using IPV4_NETWORK key is the subnet prefix lentgh - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/bytesize-index.asciidoc b/docs/versioned-plugins/filters/bytesize-index.asciidoc deleted file mode 100644 index efc9f5967..000000000 --- a/docs/versioned-plugins/filters/bytesize-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: bytesize -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/filters/checksum-index.asciidoc b/docs/versioned-plugins/filters/checksum-index.asciidoc deleted file mode 100644 index bfa48d370..000000000 --- a/docs/versioned-plugins/filters/checksum-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: checksum -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::checksum-v3.0.4.asciidoc[] -include::checksum-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/filters/checksum-v3.0.3.asciidoc b/docs/versioned-plugins/filters/checksum-v3.0.3.asciidoc deleted file mode 100644 index 777ce08de..000000000 --- a/docs/versioned-plugins/filters/checksum-v3.0.3.asciidoc +++ /dev/null @@ -1,69 +0,0 @@ -:plugin: checksum -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-checksum/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Checksum filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter let's you create a checksum based on various parts -of the logstash event. -This can be useful for deduplication of messages or simply to provide -a custom unique identifier. - -This is VERY experimental and is largely a proof-of-concept - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Checksum Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>, one of `["md5", "sha", "sha1", "sha256", "sha384"]`|No -| <<{version}-plugins-{type}s-{plugin}-keys>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-algorithm"] -===== `algorithm` - - * Value can be any of: `md5`, `sha`, `sha1`, `sha256`, `sha384` - * Default value is `"sha256"` - - - -[id="{version}-plugins-{type}s-{plugin}-keys"] -===== `keys` - - * Value type is <> - * Default value is `["message", "@timestamp", "type"]` - -A list of keys to use in creating the string to checksum -Keys will be sorted before building the string -keys and values will then be concatenated with pipe delimeters -and checksummed - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/checksum-v3.0.4.asciidoc b/docs/versioned-plugins/filters/checksum-v3.0.4.asciidoc deleted file mode 100644 index 987435a6c..000000000 --- a/docs/versioned-plugins/filters/checksum-v3.0.4.asciidoc +++ /dev/null @@ -1,69 +0,0 @@ -:plugin: checksum -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-checksum/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Checksum filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter let's you create a checksum based on various parts -of the logstash event. -This can be useful for deduplication of messages or simply to provide -a custom unique identifier. - -This is VERY experimental and is largely a proof-of-concept - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Checksum Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>, one of `["md5", "sha", "sha1", "sha256", "sha384"]`|No -| <<{version}-plugins-{type}s-{plugin}-keys>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-algorithm"] -===== `algorithm` - - * Value can be any of: `md5`, `sha`, `sha1`, `sha256`, `sha384` - * Default value is `"sha256"` - - - -[id="{version}-plugins-{type}s-{plugin}-keys"] -===== `keys` - - * Value type is <> - * Default value is `["message", "@timestamp", "type"]` - -A list of keys to use in creating the string to checksum -Keys will be sorted before building the string -keys and values will then be concatenated with pipe delimeters -and checksummed - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/cidr-index.asciidoc b/docs/versioned-plugins/filters/cidr-index.asciidoc deleted file mode 100644 index 5f29279d5..000000000 --- a/docs/versioned-plugins/filters/cidr-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: cidr -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::cidr-v3.1.2.asciidoc[] -include::cidr-v3.1.1.asciidoc[] -include::cidr-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/cidr-v3.0.1.asciidoc b/docs/versioned-plugins/filters/cidr-v3.0.1.asciidoc deleted file mode 100644 index 4c3924614..000000000 --- a/docs/versioned-plugins/filters/cidr-v3.0.1.asciidoc +++ /dev/null @@ -1,80 +0,0 @@ -:plugin: cidr -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-cidr/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Cidr filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The CIDR filter is for checking IP addresses in events against a list of -network blocks that might contain it. Multiple addresses can be checked -against multiple networks, any match succeeds. Upon success additional tags -and/or fields can be added to the event. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cidr Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-network>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-address"] -===== `address` - - * Value type is <> - * Default value is `[]` - -The IP address(es) to check with. Example: -[source,ruby] - filter { - cidr { - add_tag => [ "testnet" ] - address => [ "%{src_ip}", "%{dst_ip}" ] - network => [ "192.0.2.0/24" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-network"] -===== `network` - - * Value type is <> - * Default value is `[]` - -The IP network(s) to check against. Example: -[source,ruby] - filter { - cidr { - add_tag => [ "linklocal" ] - address => [ "%{clientip}" ] - network => [ "169.254.0.0/16", "fe80::/64" ] - } - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/cidr-v3.1.1.asciidoc b/docs/versioned-plugins/filters/cidr-v3.1.1.asciidoc deleted file mode 100644 index 03ebc90a4..000000000 --- a/docs/versioned-plugins/filters/cidr-v3.1.1.asciidoc +++ /dev/null @@ -1,114 +0,0 @@ -:plugin: cidr -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.1 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-cidr/blob/v3.1.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Cidr filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The CIDR filter is for checking IP addresses in events against a list of -network blocks that might contain it. Multiple addresses can be checked -against multiple networks, any match succeeds. Upon success additional tags -and/or fields can be added to the event. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cidr Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-network>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-network_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-refresh_interval>>| <>|No -| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-address"] -===== `address` - - * Value type is <> - * Default value is `[]` - -The IP address(es) to check with. Example: -[source,ruby] - filter { - cidr { - add_tag => [ "testnet" ] - address => [ "%{src_ip}", "%{dst_ip}" ] - network => [ "192.0.2.0/24" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-network"] -===== `network` - - * Value type is <> - * Default value is `[]` - -The IP network(s) to check against. Example: -[source,ruby] - filter { - cidr { - add_tag => [ "linklocal" ] - address => [ "%{clientip}" ] - network => [ "169.254.0.0/16", "fe80::/64" ] - } - } - - -[id="{version}-plugins-{type}s-{plugin}-network_path"] -===== `network_path` - - * Value type is <> - * There is no default value for this setting. - -The full path of the external file containing the networks the filter should check with. -Networks are separated by a separator character defined in `separator`. -[source,ruby] - 192.168.1.0/24 - 192.167.0.0/16 -NOTE: It is an error to specify both `network` and `network_path`. - -[id="{version}-plugins-{type}s-{plugin}-refresh_interval"] -===== `refresh_interval` - - * Value type is <> - * Default value is `600` - -When using an external file, this setting will indicate how frequently -(in seconds) Logstash will check the file for updates. - - -[id="{version}-plugins-{type}s-{plugin}-separator"] -===== `separator` - - * Value type is <> - * Default value is `\n` - -Separator character used for parsing networks from the external file -specified by `network_path`. Defaults to newline `\n` character. - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/cidr-v3.1.2.asciidoc b/docs/versioned-plugins/filters/cidr-v3.1.2.asciidoc deleted file mode 100644 index 5b0ad6d78..000000000 --- a/docs/versioned-plugins/filters/cidr-v3.1.2.asciidoc +++ /dev/null @@ -1,114 +0,0 @@ -:plugin: cidr -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.2 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-cidr/blob/v3.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Cidr filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The CIDR filter is for checking IP addresses in events against a list of -network blocks that might contain it. Multiple addresses can be checked -against multiple networks, any match succeeds. Upon success additional tags -and/or fields can be added to the event. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cidr Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-network>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-network_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-refresh_interval>>| <>|No -| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-address"] -===== `address` - - * Value type is <> - * Default value is `[]` - -The IP address(es) to check with. Example: -[source,ruby] - filter { - cidr { - add_tag => [ "testnet" ] - address => [ "%{src_ip}", "%{dst_ip}" ] - network => [ "192.0.2.0/24" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-network"] -===== `network` - - * Value type is <> - * Default value is `[]` - -The IP network(s) to check against. Example: -[source,ruby] - filter { - cidr { - add_tag => [ "linklocal" ] - address => [ "%{clientip}" ] - network => [ "169.254.0.0/16", "fe80::/64" ] - } - } - - -[id="{version}-plugins-{type}s-{plugin}-network_path"] -===== `network_path` - - * Value type is <> - * There is no default value for this setting. - -The full path of the external file containing the networks the filter should check with. -Networks are separated by a separator character defined in `separator`. -[source,ruby] - 192.168.1.0/24 - 192.167.0.0/16 -NOTE: It is an error to specify both `network` and `network_path`. - -[id="{version}-plugins-{type}s-{plugin}-refresh_interval"] -===== `refresh_interval` - - * Value type is <> - * Default value is `600` - -When using an external file, this setting will indicate how frequently -(in seconds) Logstash will check the file for updates. - - -[id="{version}-plugins-{type}s-{plugin}-separator"] -===== `separator` - - * Value type is <> - * Default value is `\n` - -Separator character used for parsing networks from the external file -specified by `network_path`. Defaults to newline `\n` character. - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/cipher-index.asciidoc b/docs/versioned-plugins/filters/cipher-index.asciidoc deleted file mode 100644 index 872886b79..000000000 --- a/docs/versioned-plugins/filters/cipher-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: cipher -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-10-02 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::cipher-v3.0.1.asciidoc[] -include::cipher-v3.0.0.asciidoc[] -include::cipher-v2.0.7.asciidoc[] -include::cipher-v2.0.6.asciidoc[] - diff --git a/docs/versioned-plugins/filters/cipher-v2.0.6.asciidoc b/docs/versioned-plugins/filters/cipher-v2.0.6.asciidoc deleted file mode 100644 index 93f0b4854..000000000 --- a/docs/versioned-plugins/filters/cipher-v2.0.6.asciidoc +++ /dev/null @@ -1,243 +0,0 @@ -:plugin: cipher -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.6 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-cipher/blob/v2.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Cipher filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter parses a source and apply a cipher or decipher before -storing it in the target. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cipher Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-base64>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cipher_padding>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-iv_random_length>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_pad>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-key_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_cipher_reuse>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-algorithm"] -===== `algorithm` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The cipher algorithm - -A list of supported algorithms can be obtained by -[source,ruby] - puts OpenSSL::Cipher.ciphers - -[id="{version}-plugins-{type}s-{plugin}-base64"] -===== `base64` - - * Value type is <> - * Default value is `true` - -Do we have to perform a `base64` decode or encode? - -If we are decrypting, `base64` decode will be done before. -If we are encrypting, `base64` will be done after. - - -[id="{version}-plugins-{type}s-{plugin}-cipher_padding"] -===== `cipher_padding` - - * Value type is <> - * There is no default value for this setting. - -Cipher padding to use. Enables or disables padding. - -By default encryption operations are padded using standard block padding -and the padding is checked and removed when decrypting. If the pad -parameter is zero then no padding is performed, the total amount of data -encrypted or decrypted must then be a multiple of the block size or an -error will occur. - -See EVP_CIPHER_CTX_set_padding for further information. - -We are using Openssl jRuby which uses default padding to PKCS5Padding -If you want to change it, set this parameter. If you want to disable -it, Set this parameter to 0 -[source,ruby] - filter { cipher { cipher_padding => 0 }} - -[id="{version}-plugins-{type}s-{plugin}-iv"] -===== `iv` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -The initialization vector to use (statically hard-coded). For -a random IV see the iv_random_length property - -NOTE: If iv_random_length is set, it takes precedence over any value set for "iv" - -The cipher modes CBC, CFB, OFB and CTR all need an "initialization -vector", or short, IV. ECB mode is the only mode that does not require -an IV, but there is almost no legitimate use case for this mode -because of the fact that it does not sufficiently hide plaintext patterns. - -For AES algorithms set this to a 16 byte string. -[source,ruby] - filter { cipher { iv => "1234567890123456" }} - -Deprecated: Please use `iv_random_length` instead - -[id="{version}-plugins-{type}s-{plugin}-iv_random_length"] -===== `iv_random_length` - - * Value type is <> - * There is no default value for this setting. - -Force an random IV to be used per encryption invocation and specify -the length of the random IV that will be generated via: - - OpenSSL::Random.random_bytes(int_length) - -If iv_random_length is set, it takes precedence over any value set for "iv" - -Enabling this will force the plugin to generate a unique -random IV for each encryption call. This random IV will be prepended to the -encrypted result bytes and then base64 encoded. On decryption "iv_random_length" must -also be set to utilize this feature. Random IV's are better than statically -hardcoded IVs - -For AES algorithms you can set this to a 16 -[source,ruby] - filter { cipher { iv_random_length => 16 }} - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * There is no default value for this setting. - -The key to use - -NOTE: If you encounter an error message at runtime containing the following: - -"java.security.InvalidKeyException: Illegal key size: possibly you need to install -Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files for your JRE" - -Please read the following: https://github.com/jruby/jruby/wiki/UnlimitedStrengthCrypto - - -[id="{version}-plugins-{type}s-{plugin}-key_pad"] -===== `key_pad` - - * Value type is <> - * Default value is `"\u0000"` - -The character used to pad the key - -[id="{version}-plugins-{type}s-{plugin}-key_size"] -===== `key_size` - - * Value type is <> - * Default value is `16` - -The key size to pad - -It depends of the cipher algorithm. If your key doesn't need -padding, don't set this parameter - -Example, for AES-128, we must have 16 char long key. AES-256 = 32 chars -[source,ruby] - filter { cipher { key_size => 16 } - - -[id="{version}-plugins-{type}s-{plugin}-max_cipher_reuse"] -===== `max_cipher_reuse` - - * Value type is <> - * Default value is `1` - -If this is set the internal Cipher instance will be -re-used up to @max_cipher_reuse times before being -reset() and re-created from scratch. This is an option -for efficiency where lots of data is being encrypted -and decrypted using this filter. This lets the filter -avoid creating new Cipher instances over and over -for each encrypt/decrypt operation. - -This is optional, the default is no re-use of the Cipher -instance and max_cipher_reuse = 1 by default -[source,ruby] - filter { cipher { max_cipher_reuse => 1000 }} - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Encrypting or decrypting some data - -Valid values are encrypt or decrypt - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The field to perform filter - -Example, to use the @message field (default) : -[source,ruby] - filter { cipher { source => "message" } } - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"message"` - -The name of the container to put the result - -Example, to place the result into crypt : -[source,ruby] - filter { cipher { target => "crypt" } } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/cipher-v2.0.7.asciidoc b/docs/versioned-plugins/filters/cipher-v2.0.7.asciidoc deleted file mode 100644 index 0c4f34c40..000000000 --- a/docs/versioned-plugins/filters/cipher-v2.0.7.asciidoc +++ /dev/null @@ -1,243 +0,0 @@ -:plugin: cipher -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.7 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-cipher/blob/v2.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Cipher filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter parses a source and apply a cipher or decipher before -storing it in the target. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cipher Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-base64>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cipher_padding>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-iv_random_length>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_pad>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-key_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_cipher_reuse>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-algorithm"] -===== `algorithm` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The cipher algorithm - -A list of supported algorithms can be obtained by -[source,ruby] - puts OpenSSL::Cipher.ciphers - -[id="{version}-plugins-{type}s-{plugin}-base64"] -===== `base64` - - * Value type is <> - * Default value is `true` - -Do we have to perform a `base64` decode or encode? - -If we are decrypting, `base64` decode will be done before. -If we are encrypting, `base64` will be done after. - - -[id="{version}-plugins-{type}s-{plugin}-cipher_padding"] -===== `cipher_padding` - - * Value type is <> - * There is no default value for this setting. - -Cipher padding to use. Enables or disables padding. - -By default encryption operations are padded using standard block padding -and the padding is checked and removed when decrypting. If the pad -parameter is zero then no padding is performed, the total amount of data -encrypted or decrypted must then be a multiple of the block size or an -error will occur. - -See EVP_CIPHER_CTX_set_padding for further information. - -We are using Openssl jRuby which uses default padding to PKCS5Padding -If you want to change it, set this parameter. If you want to disable -it, Set this parameter to 0 -[source,ruby] - filter { cipher { cipher_padding => 0 }} - -[id="{version}-plugins-{type}s-{plugin}-iv"] -===== `iv` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -The initialization vector to use (statically hard-coded). For -a random IV see the iv_random_length property - -NOTE: If iv_random_length is set, it takes precedence over any value set for "iv" - -The cipher modes CBC, CFB, OFB and CTR all need an "initialization -vector", or short, IV. ECB mode is the only mode that does not require -an IV, but there is almost no legitimate use case for this mode -because of the fact that it does not sufficiently hide plaintext patterns. - -For AES algorithms set this to a 16 byte string. -[source,ruby] - filter { cipher { iv => "1234567890123456" }} - -Deprecated: Please use `iv_random_length` instead - -[id="{version}-plugins-{type}s-{plugin}-iv_random_length"] -===== `iv_random_length` - - * Value type is <> - * There is no default value for this setting. - -Force an random IV to be used per encryption invocation and specify -the length of the random IV that will be generated via: - - OpenSSL::Random.random_bytes(int_length) - -If iv_random_length is set, it takes precedence over any value set for "iv" - -Enabling this will force the plugin to generate a unique -random IV for each encryption call. This random IV will be prepended to the -encrypted result bytes and then base64 encoded. On decryption "iv_random_length" must -also be set to utilize this feature. Random IV's are better than statically -hardcoded IVs - -For AES algorithms you can set this to a 16 -[source,ruby] - filter { cipher { iv_random_length => 16 }} - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * There is no default value for this setting. - -The key to use - -NOTE: If you encounter an error message at runtime containing the following: - -"java.security.InvalidKeyException: Illegal key size: possibly you need to install -Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files for your JRE" - -Please read the following: https://github.com/jruby/jruby/wiki/UnlimitedStrengthCrypto - - -[id="{version}-plugins-{type}s-{plugin}-key_pad"] -===== `key_pad` - - * Value type is <> - * Default value is `"\u0000"` - -The character used to pad the key - -[id="{version}-plugins-{type}s-{plugin}-key_size"] -===== `key_size` - - * Value type is <> - * Default value is `16` - -The key size to pad - -It depends of the cipher algorithm. If your key doesn't need -padding, don't set this parameter - -Example, for AES-128, we must have 16 char long key. AES-256 = 32 chars -[source,ruby] - filter { cipher { key_size => 16 } - - -[id="{version}-plugins-{type}s-{plugin}-max_cipher_reuse"] -===== `max_cipher_reuse` - - * Value type is <> - * Default value is `1` - -If this is set the internal Cipher instance will be -re-used up to @max_cipher_reuse times before being -reset() and re-created from scratch. This is an option -for efficiency where lots of data is being encrypted -and decrypted using this filter. This lets the filter -avoid creating new Cipher instances over and over -for each encrypt/decrypt operation. - -This is optional, the default is no re-use of the Cipher -instance and max_cipher_reuse = 1 by default -[source,ruby] - filter { cipher { max_cipher_reuse => 1000 }} - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Encrypting or decrypting some data - -Valid values are encrypt or decrypt - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The field to perform filter - -Example, to use the @message field (default) : -[source,ruby] - filter { cipher { source => "message" } } - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"message"` - -The name of the container to put the result - -Example, to place the result into crypt : -[source,ruby] - filter { cipher { target => "crypt" } } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/cipher-v3.0.0.asciidoc b/docs/versioned-plugins/filters/cipher-v3.0.0.asciidoc deleted file mode 100644 index b207b1333..000000000 --- a/docs/versioned-plugins/filters/cipher-v3.0.0.asciidoc +++ /dev/null @@ -1,220 +0,0 @@ -:plugin: cipher -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.0 -:release_date: 2017-10-02 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-cipher/blob/v3.0.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Cipher filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter parses a source and apply a cipher or decipher before -storing it in the target. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cipher Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-base64>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cipher_padding>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-iv_random_length>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_pad>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-key_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_cipher_reuse>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-algorithm"] -===== `algorithm` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The cipher algorithm - -A list of supported algorithms can be obtained by -[source,ruby] - puts OpenSSL::Cipher.ciphers - -[id="{version}-plugins-{type}s-{plugin}-base64"] -===== `base64` - - * Value type is <> - * Default value is `true` - -Do we have to perform a `base64` decode or encode? - -If we are decrypting, `base64` decode will be done before. -If we are encrypting, `base64` will be done after. - - -[id="{version}-plugins-{type}s-{plugin}-cipher_padding"] -===== `cipher_padding` - - * Value type is <> - * There is no default value for this setting. - -Cipher padding to use. Enables or disables padding. - -By default encryption operations are padded using standard block padding -and the padding is checked and removed when decrypting. If the pad -parameter is zero then no padding is performed, the total amount of data -encrypted or decrypted must then be a multiple of the block size or an -error will occur. - -See EVP_CIPHER_CTX_set_padding for further information. - -We are using Openssl jRuby which uses default padding to PKCS5Padding -If you want to change it, set this parameter. If you want to disable -it, Set this parameter to 0 -[source,ruby] - filter { cipher { cipher_padding => 0 }} - -[id="{version}-plugins-{type}s-{plugin}-iv_random_length"] -===== `iv_random_length` - - * Value type is <> - * There is no default value for this setting. - -Force an random IV to be used per encryption invocation and specify -the length of the random IV that will be generated via: - - OpenSSL::Random.random_bytes(int_length) - -If iv_random_length is set, it takes precedence over any value set for "iv" - -Enabling this will force the plugin to generate a unique -random IV for each encryption call. This random IV will be prepended to the -encrypted result bytes and then base64 encoded. On decryption "iv_random_length" must -also be set to utilize this feature. Random IV's are better than statically -hardcoded IVs - -For AES algorithms you can set this to a 16 -[source,ruby] - filter { cipher { iv_random_length => 16 }} - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * There is no default value for this setting. - -The key to use - -NOTE: If you encounter an error message at runtime containing the following: - -"java.security.InvalidKeyException: Illegal key size: possibly you need to install -Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files for your JRE" - -Please read the following: https://github.com/jruby/jruby/wiki/UnlimitedStrengthCrypto - - -[id="{version}-plugins-{type}s-{plugin}-key_pad"] -===== `key_pad` - - * Value type is <> - * Default value is `"\u0000"` - -The character used to pad the key - -[id="{version}-plugins-{type}s-{plugin}-key_size"] -===== `key_size` - - * Value type is <> - * Default value is `16` - -The key size to pad - -It depends of the cipher algorithm. If your key doesn't need -padding, don't set this parameter - -Example, for AES-128, we must have 16 char long key. AES-256 = 32 chars -[source,ruby] - filter { cipher { key_size => 16 } - - -[id="{version}-plugins-{type}s-{plugin}-max_cipher_reuse"] -===== `max_cipher_reuse` - - * Value type is <> - * Default value is `1` - -If this is set the internal Cipher instance will be -re-used up to @max_cipher_reuse times before being -reset() and re-created from scratch. This is an option -for efficiency where lots of data is being encrypted -and decrypted using this filter. This lets the filter -avoid creating new Cipher instances over and over -for each encrypt/decrypt operation. - -This is optional, the default is no re-use of the Cipher -instance and max_cipher_reuse = 1 by default -[source,ruby] - filter { cipher { max_cipher_reuse => 1000 }} - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Encrypting or decrypting some data - -Valid values are encrypt or decrypt - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The field to perform filter - -Example, to use the @message field (default) : -[source,ruby] - filter { cipher { source => "message" } } - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"message"` - -The name of the container to put the result - -Example, to place the result into crypt : -[source,ruby] - filter { cipher { target => "crypt" } } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/cipher-v3.0.1.asciidoc b/docs/versioned-plugins/filters/cipher-v3.0.1.asciidoc deleted file mode 100644 index 009f95aaa..000000000 --- a/docs/versioned-plugins/filters/cipher-v3.0.1.asciidoc +++ /dev/null @@ -1,220 +0,0 @@ -:plugin: cipher -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-cipher/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Cipher filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter parses a source and apply a cipher or decipher before -storing it in the target. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cipher Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-algorithm>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-base64>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cipher_padding>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-iv_random_length>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_pad>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-key_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_cipher_reuse>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-algorithm"] -===== `algorithm` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The cipher algorithm - -A list of supported algorithms can be obtained by -[source,ruby] - puts OpenSSL::Cipher.ciphers - -[id="{version}-plugins-{type}s-{plugin}-base64"] -===== `base64` - - * Value type is <> - * Default value is `true` - -Do we have to perform a `base64` decode or encode? - -If we are decrypting, `base64` decode will be done before. -If we are encrypting, `base64` will be done after. - - -[id="{version}-plugins-{type}s-{plugin}-cipher_padding"] -===== `cipher_padding` - - * Value type is <> - * There is no default value for this setting. - -Cipher padding to use. Enables or disables padding. - -By default encryption operations are padded using standard block padding -and the padding is checked and removed when decrypting. If the pad -parameter is zero then no padding is performed, the total amount of data -encrypted or decrypted must then be a multiple of the block size or an -error will occur. - -See EVP_CIPHER_CTX_set_padding for further information. - -We are using Openssl jRuby which uses default padding to PKCS5Padding -If you want to change it, set this parameter. If you want to disable -it, Set this parameter to 0 -[source,ruby] - filter { cipher { cipher_padding => 0 }} - -[id="{version}-plugins-{type}s-{plugin}-iv_random_length"] -===== `iv_random_length` - - * Value type is <> - * There is no default value for this setting. - -Force an random IV to be used per encryption invocation and specify -the length of the random IV that will be generated via: - - OpenSSL::Random.random_bytes(int_length) - -If iv_random_length is set, it takes precedence over any value set for "iv" - -Enabling this will force the plugin to generate a unique -random IV for each encryption call. This random IV will be prepended to the -encrypted result bytes and then base64 encoded. On decryption "iv_random_length" must -also be set to utilize this feature. Random IV's are better than statically -hardcoded IVs - -For AES algorithms you can set this to a 16 -[source,ruby] - filter { cipher { iv_random_length => 16 }} - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * There is no default value for this setting. - -The key to use - -NOTE: If you encounter an error message at runtime containing the following: - -"java.security.InvalidKeyException: Illegal key size: possibly you need to install -Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy Files for your JRE" - -Please read the following: https://github.com/jruby/jruby/wiki/UnlimitedStrengthCrypto - - -[id="{version}-plugins-{type}s-{plugin}-key_pad"] -===== `key_pad` - - * Value type is <> - * Default value is `"\u0000"` - -The character used to pad the key - -[id="{version}-plugins-{type}s-{plugin}-key_size"] -===== `key_size` - - * Value type is <> - * Default value is `16` - -The key size to pad - -It depends of the cipher algorithm. If your key doesn't need -padding, don't set this parameter - -Example, for AES-128, we must have 16 char long key. AES-256 = 32 chars -[source,ruby] - filter { cipher { key_size => 16 } - - -[id="{version}-plugins-{type}s-{plugin}-max_cipher_reuse"] -===== `max_cipher_reuse` - - * Value type is <> - * Default value is `1` - -If this is set the internal Cipher instance will be -re-used up to @max_cipher_reuse times before being -reset() and re-created from scratch. This is an option -for efficiency where lots of data is being encrypted -and decrypted using this filter. This lets the filter -avoid creating new Cipher instances over and over -for each encrypt/decrypt operation. - -This is optional, the default is no re-use of the Cipher -instance and max_cipher_reuse = 1 by default -[source,ruby] - filter { cipher { max_cipher_reuse => 1000 }} - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Encrypting or decrypting some data - -Valid values are encrypt or decrypt - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The field to perform filter - -Example, to use the @message field (default) : -[source,ruby] - filter { cipher { source => "message" } } - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"message"` - -The name of the container to put the result - -Example, to place the result into crypt : -[source,ruby] - filter { cipher { target => "crypt" } } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/clone-index.asciidoc b/docs/versioned-plugins/filters/clone-index.asciidoc deleted file mode 100644 index e411c7f54..000000000 --- a/docs/versioned-plugins/filters/clone-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: clone -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::clone-v3.0.5.asciidoc[] -include::clone-v3.0.4.asciidoc[] -include::clone-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/filters/clone-v3.0.3.asciidoc b/docs/versioned-plugins/filters/clone-v3.0.3.asciidoc deleted file mode 100644 index 1feaab8e7..000000000 --- a/docs/versioned-plugins/filters/clone-v3.0.3.asciidoc +++ /dev/null @@ -1,57 +0,0 @@ -:plugin: clone -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-clone/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Clone filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The clone filter is for duplicating events. -A clone will be created for each type in the clone list. -The original event is left unchanged. -Created events are inserted into the pipeline -as normal events and will be processed by the remaining pipeline configuration -starting from the filter that generated them (i.e. this plugin). - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Clone Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-clones>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-clones"] -===== `clones` - - * Value type is <> - * Default value is `[]` - -A new clone will be created with the given type for each type in this list. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/clone-v3.0.4.asciidoc b/docs/versioned-plugins/filters/clone-v3.0.4.asciidoc deleted file mode 100644 index 96e3bc45c..000000000 --- a/docs/versioned-plugins/filters/clone-v3.0.4.asciidoc +++ /dev/null @@ -1,57 +0,0 @@ -:plugin: clone -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-clone/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Clone filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The clone filter is for duplicating events. -A clone will be created for each type in the clone list. -The original event is left unchanged. -Created events are inserted into the pipeline -as normal events and will be processed by the remaining pipeline configuration -starting from the filter that generated them (i.e. this plugin). - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Clone Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-clones>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-clones"] -===== `clones` - - * Value type is <> - * Default value is `[]` - -A new clone will be created with the given type for each type in this list. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/clone-v3.0.5.asciidoc b/docs/versioned-plugins/filters/clone-v3.0.5.asciidoc deleted file mode 100644 index 00a235bde..000000000 --- a/docs/versioned-plugins/filters/clone-v3.0.5.asciidoc +++ /dev/null @@ -1,57 +0,0 @@ -:plugin: clone -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-clone/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Clone filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The clone filter is for duplicating events. -A clone will be created for each type in the clone list. -The original event is left unchanged. -Created events are inserted into the pipeline -as normal events and will be processed by the remaining pipeline configuration -starting from the filter that generated them (i.e. this plugin). - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Clone Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-clones>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-clones"] -===== `clones` - - * Value type is <> - * Default value is `[]` - -A new clone will be created with the given type for each type in this list. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/cloudfoundry-index.asciidoc b/docs/versioned-plugins/filters/cloudfoundry-index.asciidoc deleted file mode 100644 index 70c2b846c..000000000 --- a/docs/versioned-plugins/filters/cloudfoundry-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: cloudfoundry -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/filters/collate-index.asciidoc b/docs/versioned-plugins/filters/collate-index.asciidoc deleted file mode 100644 index 0998a1c97..000000000 --- a/docs/versioned-plugins/filters/collate-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: collate -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::collate-v2.0.6.asciidoc[] -include::collate-v2.0.5.asciidoc[] - diff --git a/docs/versioned-plugins/filters/collate-v2.0.5.asciidoc b/docs/versioned-plugins/filters/collate-v2.0.5.asciidoc deleted file mode 100644 index 6aa1bc021..000000000 --- a/docs/versioned-plugins/filters/collate-v2.0.5.asciidoc +++ /dev/null @@ -1,84 +0,0 @@ -:plugin: collate -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-collate/blob/v2.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Collate filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Collate events by time or count. - -The original goal of this filter was to merge the logs from different sources -by the time of log, for example, in real-time log collection, logs can be -collated by amount of 3000 logs or can be collated in 30 seconds. - -The config looks like this: -[source,ruby] - filter { - collate { - count => 3000 - interval => "30s" - order => "ascending" - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Collate Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-order>> |<>, one of `["ascending", "descending"]`|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-count"] -===== `count` - - * Value type is <> - * Default value is `1000` - -How many logs should be collated. - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `"1m"` - -The `interval` is the time window which how long the logs should be collated. (default `1m`) - -[id="{version}-plugins-{type}s-{plugin}-order"] -===== `order` - - * Value can be any of: `ascending`, `descending` - * Default value is `"ascending"` - -The `order` collated events should appear in. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/collate-v2.0.6.asciidoc b/docs/versioned-plugins/filters/collate-v2.0.6.asciidoc deleted file mode 100644 index bf5e4303d..000000000 --- a/docs/versioned-plugins/filters/collate-v2.0.6.asciidoc +++ /dev/null @@ -1,84 +0,0 @@ -:plugin: collate -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.6 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-collate/blob/v2.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Collate filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Collate events by time or count. - -The original goal of this filter was to merge the logs from different sources -by the time of log, for example, in real-time log collection, logs can be -collated by amount of 3000 logs or can be collated in 30 seconds. - -The config looks like this: -[source,ruby] - filter { - collate { - count => 3000 - interval => "30s" - order => "ascending" - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Collate Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-order>> |<>, one of `["ascending", "descending"]`|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-count"] -===== `count` - - * Value type is <> - * Default value is `1000` - -How many logs should be collated. - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `"1m"` - -The `interval` is the time window which how long the logs should be collated. (default `1m`) - -[id="{version}-plugins-{type}s-{plugin}-order"] -===== `order` - - * Value can be any of: `ascending`, `descending` - * Default value is `"ascending"` - -The `order` collated events should appear in. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/csv-index.asciidoc b/docs/versioned-plugins/filters/csv-index.asciidoc deleted file mode 100644 index 6e0c3d04f..000000000 --- a/docs/versioned-plugins/filters/csv-index.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -:plugin: csv -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-11-03 -| <> | 2017-08-15 -| <> | 2017-06-23 -| <> | 2017-05-24 -|======================================================================= - -include::csv-v3.0.7.asciidoc[] -include::csv-v3.0.6.asciidoc[] -include::csv-v3.0.5.asciidoc[] -include::csv-v3.0.4.asciidoc[] -include::csv-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/filters/csv-v3.0.3.asciidoc b/docs/versioned-plugins/filters/csv-v3.0.3.asciidoc deleted file mode 100644 index 2b6578204..000000000 --- a/docs/versioned-plugins/filters/csv-v3.0.3.asciidoc +++ /dev/null @@ -1,152 +0,0 @@ -:plugin: csv -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-05-24 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-csv/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Csv - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The CSV filter takes an event field containing CSV data, parses it, -and stores it as individual fields (can optionally specify the names). -This filter can also parse data with any separator, not just commas. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Csv Filter Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-autodetect_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-autogenerate_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-columns>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-quote_char>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-skip_empty_columns>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-autodetect_column_names"] -===== `autodetect_column_names` - - * Value type is <> - * Default value is `false` - -Define whether column names should be auto-detected from the header column or not. -Defaults to false. - -[id="{version}-plugins-{type}s-{plugin}-autogenerate_column_names"] -===== `autogenerate_column_names` - - * Value type is <> - * Default value is `true` - -Define whether column names should autogenerated or not. -Defaults to true. If set to false, columns not having a header specified will not be parsed. - -[id="{version}-plugins-{type}s-{plugin}-columns"] -===== `columns` - - * Value type is <> - * Default value is `[]` - -Define a list of column names (in the order they appear in the CSV, -as if it were a header line). If `columns` is not configured, or there -are not enough columns specified, the default column names are -"column1", "column2", etc. In the case that there are more columns -in the data than specified in this column list, extra columns will be auto-numbered: -(e.g. "user_defined_1", "user_defined_2", "column3", "column4", etc.) - -[id="{version}-plugins-{type}s-{plugin}-convert"] -===== `convert` - - * Value type is <> - * Default value is `{}` - -Define a set of datatype conversions to be applied to columns. -Possible conversions are integer, float, date, date_time, boolean - -# Example: -[source,ruby] - filter { - csv { - convert => { - "column1" => "integer" - "column2" => "boolean" - } - } - } - -[id="{version}-plugins-{type}s-{plugin}-quote_char"] -===== `quote_char` - - * Value type is <> - * Default value is `"\""` - -Define the character used to quote CSV fields. If this is not specified -the default is a double quote `"`. -Optional. - -[id="{version}-plugins-{type}s-{plugin}-separator"] -===== `separator` - - * Value type is <> - * Default value is `","` - -Define the column separator value. If this is not specified, the default -is a comma `,`. If you want to define a tabulation as a separator, you need -to set the value to the actual tab character and not `\t`. -Optional. - -[id="{version}-plugins-{type}s-{plugin}-skip_empty_columns"] -===== `skip_empty_columns` - - * Value type is <> - * Default value is `false` - -Define whether empty columns should be skipped. -Defaults to false. If set to true, columns containing no value will not get set. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The CSV data in the value of the `source` field will be expanded into a -data structure. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define target field for placing the data. -Defaults to writing to the root of the event. - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/csv-v3.0.4.asciidoc b/docs/versioned-plugins/filters/csv-v3.0.4.asciidoc deleted file mode 100644 index 7ce6f5168..000000000 --- a/docs/versioned-plugins/filters/csv-v3.0.4.asciidoc +++ /dev/null @@ -1,153 +0,0 @@ -:plugin: csv -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-csv/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Csv filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The CSV filter takes an event field containing CSV data, parses it, -and stores it as individual fields (can optionally specify the names). -This filter can also parse data with any separator, not just commas. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Csv Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-autodetect_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-autogenerate_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-columns>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-quote_char>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-skip_empty_columns>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-autodetect_column_names"] -===== `autodetect_column_names` - - * Value type is <> - * Default value is `false` - -Define whether column names should be auto-detected from the header column or not. -Defaults to false. - -[id="{version}-plugins-{type}s-{plugin}-autogenerate_column_names"] -===== `autogenerate_column_names` - - * Value type is <> - * Default value is `true` - -Define whether column names should autogenerated or not. -Defaults to true. If set to false, columns not having a header specified will not be parsed. - -[id="{version}-plugins-{type}s-{plugin}-columns"] -===== `columns` - - * Value type is <> - * Default value is `[]` - -Define a list of column names (in the order they appear in the CSV, -as if it were a header line). If `columns` is not configured, or there -are not enough columns specified, the default column names are -"column1", "column2", etc. In the case that there are more columns -in the data than specified in this column list, extra columns will be auto-numbered: -(e.g. "user_defined_1", "user_defined_2", "column3", "column4", etc.) - -[id="{version}-plugins-{type}s-{plugin}-convert"] -===== `convert` - - * Value type is <> - * Default value is `{}` - -Define a set of datatype conversions to be applied to columns. -Possible conversions are integer, float, date, date_time, boolean - -# Example: -[source,ruby] - filter { - csv { - convert => { - "column1" => "integer" - "column2" => "boolean" - } - } - } - -[id="{version}-plugins-{type}s-{plugin}-quote_char"] -===== `quote_char` - - * Value type is <> - * Default value is `"\""` - -Define the character used to quote CSV fields. If this is not specified -the default is a double quote `"`. -Optional. - -[id="{version}-plugins-{type}s-{plugin}-separator"] -===== `separator` - - * Value type is <> - * Default value is `","` - -Define the column separator value. If this is not specified, the default -is a comma `,`. If you want to define a tabulation as a separator, you need -to set the value to the actual tab character and not `\t`. -Optional. - -[id="{version}-plugins-{type}s-{plugin}-skip_empty_columns"] -===== `skip_empty_columns` - - * Value type is <> - * Default value is `false` - -Define whether empty columns should be skipped. -Defaults to false. If set to true, columns containing no value will not get set. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The CSV data in the value of the `source` field will be expanded into a -data structure. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define target field for placing the data. -Defaults to writing to the root of the event. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/csv-v3.0.5.asciidoc b/docs/versioned-plugins/filters/csv-v3.0.5.asciidoc deleted file mode 100644 index 01767cdaa..000000000 --- a/docs/versioned-plugins/filters/csv-v3.0.5.asciidoc +++ /dev/null @@ -1,153 +0,0 @@ -:plugin: csv -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-csv/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Csv filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The CSV filter takes an event field containing CSV data, parses it, -and stores it as individual fields (can optionally specify the names). -This filter can also parse data with any separator, not just commas. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Csv Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-autodetect_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-autogenerate_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-columns>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-quote_char>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-skip_empty_columns>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-autodetect_column_names"] -===== `autodetect_column_names` - - * Value type is <> - * Default value is `false` - -Define whether column names should be auto-detected from the header column or not. -Defaults to false. - -[id="{version}-plugins-{type}s-{plugin}-autogenerate_column_names"] -===== `autogenerate_column_names` - - * Value type is <> - * Default value is `true` - -Define whether column names should autogenerated or not. -Defaults to true. If set to false, columns not having a header specified will not be parsed. - -[id="{version}-plugins-{type}s-{plugin}-columns"] -===== `columns` - - * Value type is <> - * Default value is `[]` - -Define a list of column names (in the order they appear in the CSV, -as if it were a header line). If `columns` is not configured, or there -are not enough columns specified, the default column names are -"column1", "column2", etc. In the case that there are more columns -in the data than specified in this column list, extra columns will be auto-numbered: -(e.g. "user_defined_1", "user_defined_2", "column3", "column4", etc.) - -[id="{version}-plugins-{type}s-{plugin}-convert"] -===== `convert` - - * Value type is <> - * Default value is `{}` - -Define a set of datatype conversions to be applied to columns. -Possible conversions are integer, float, date, date_time, boolean - -# Example: -[source,ruby] - filter { - csv { - convert => { - "column1" => "integer" - "column2" => "boolean" - } - } - } - -[id="{version}-plugins-{type}s-{plugin}-quote_char"] -===== `quote_char` - - * Value type is <> - * Default value is `"\""` - -Define the character used to quote CSV fields. If this is not specified -the default is a double quote `"`. -Optional. - -[id="{version}-plugins-{type}s-{plugin}-separator"] -===== `separator` - - * Value type is <> - * Default value is `","` - -Define the column separator value. If this is not specified, the default -is a comma `,`. If you want to define a tabulation as a separator, you need -to set the value to the actual tab character and not `\t`. -Optional. - -[id="{version}-plugins-{type}s-{plugin}-skip_empty_columns"] -===== `skip_empty_columns` - - * Value type is <> - * Default value is `false` - -Define whether empty columns should be skipped. -Defaults to false. If set to true, columns containing no value will not get set. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The CSV data in the value of the `source` field will be expanded into a -data structure. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define target field for placing the data. -Defaults to writing to the root of the event. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/csv-v3.0.6.asciidoc b/docs/versioned-plugins/filters/csv-v3.0.6.asciidoc deleted file mode 100644 index 441d2a0fd..000000000 --- a/docs/versioned-plugins/filters/csv-v3.0.6.asciidoc +++ /dev/null @@ -1,153 +0,0 @@ -:plugin: csv -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-11-03 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-csv/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Csv filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The CSV filter takes an event field containing CSV data, parses it, -and stores it as individual fields (can optionally specify the names). -This filter can also parse data with any separator, not just commas. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Csv Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-autodetect_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-autogenerate_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-columns>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-quote_char>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-skip_empty_columns>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-autodetect_column_names"] -===== `autodetect_column_names` - - * Value type is <> - * Default value is `false` - -Define whether column names should be auto-detected from the header column or not. -Defaults to false. - -[id="{version}-plugins-{type}s-{plugin}-autogenerate_column_names"] -===== `autogenerate_column_names` - - * Value type is <> - * Default value is `true` - -Define whether column names should autogenerated or not. -Defaults to true. If set to false, columns not having a header specified will not be parsed. - -[id="{version}-plugins-{type}s-{plugin}-columns"] -===== `columns` - - * Value type is <> - * Default value is `[]` - -Define a list of column names (in the order they appear in the CSV, -as if it were a header line). If `columns` is not configured, or there -are not enough columns specified, the default column names are -"column1", "column2", etc. In the case that there are more columns -in the data than specified in this column list, extra columns will be auto-numbered: -(e.g. "user_defined_1", "user_defined_2", "column3", "column4", etc.) - -[id="{version}-plugins-{type}s-{plugin}-convert"] -===== `convert` - - * Value type is <> - * Default value is `{}` - -Define a set of datatype conversions to be applied to columns. -Possible conversions are integer, float, date, date_time, boolean - -# Example: -[source,ruby] - filter { - csv { - convert => { - "column1" => "integer" - "column2" => "boolean" - } - } - } - -[id="{version}-plugins-{type}s-{plugin}-quote_char"] -===== `quote_char` - - * Value type is <> - * Default value is `"\""` - -Define the character used to quote CSV fields. If this is not specified -the default is a double quote `"`. -Optional. - -[id="{version}-plugins-{type}s-{plugin}-separator"] -===== `separator` - - * Value type is <> - * Default value is `","` - -Define the column separator value. If this is not specified, the default -is a comma `,`. If you want to define a tabulation as a separator, you need -to set the value to the actual tab character and not `\t`. -Optional. - -[id="{version}-plugins-{type}s-{plugin}-skip_empty_columns"] -===== `skip_empty_columns` - - * Value type is <> - * Default value is `false` - -Define whether empty columns should be skipped. -Defaults to false. If set to true, columns containing no value will not get set. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The CSV data in the value of the `source` field will be expanded into a -data structure. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define target field for placing the data. -Defaults to writing to the root of the event. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/csv-v3.0.7.asciidoc b/docs/versioned-plugins/filters/csv-v3.0.7.asciidoc deleted file mode 100644 index a6e1c05f2..000000000 --- a/docs/versioned-plugins/filters/csv-v3.0.7.asciidoc +++ /dev/null @@ -1,153 +0,0 @@ -:plugin: csv -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.7 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-csv/blob/v3.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Csv filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The CSV filter takes an event field containing CSV data, parses it, -and stores it as individual fields (can optionally specify the names). -This filter can also parse data with any separator, not just commas. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Csv Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-autodetect_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-autogenerate_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-columns>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-quote_char>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-skip_empty_columns>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-autodetect_column_names"] -===== `autodetect_column_names` - - * Value type is <> - * Default value is `false` - -Define whether column names should be auto-detected from the header column or not. -Defaults to false. - -[id="{version}-plugins-{type}s-{plugin}-autogenerate_column_names"] -===== `autogenerate_column_names` - - * Value type is <> - * Default value is `true` - -Define whether column names should autogenerated or not. -Defaults to true. If set to false, columns not having a header specified will not be parsed. - -[id="{version}-plugins-{type}s-{plugin}-columns"] -===== `columns` - - * Value type is <> - * Default value is `[]` - -Define a list of column names (in the order they appear in the CSV, -as if it were a header line). If `columns` is not configured, or there -are not enough columns specified, the default column names are -"column1", "column2", etc. In the case that there are more columns -in the data than specified in this column list, extra columns will be auto-numbered: -(e.g. "user_defined_1", "user_defined_2", "column3", "column4", etc.) - -[id="{version}-plugins-{type}s-{plugin}-convert"] -===== `convert` - - * Value type is <> - * Default value is `{}` - -Define a set of datatype conversions to be applied to columns. -Possible conversions are integer, float, date, date_time, boolean - -# Example: -[source,ruby] - filter { - csv { - convert => { - "column1" => "integer" - "column2" => "boolean" - } - } - } - -[id="{version}-plugins-{type}s-{plugin}-quote_char"] -===== `quote_char` - - * Value type is <> - * Default value is `"\""` - -Define the character used to quote CSV fields. If this is not specified -the default is a double quote `"`. -Optional. - -[id="{version}-plugins-{type}s-{plugin}-separator"] -===== `separator` - - * Value type is <> - * Default value is `","` - -Define the column separator value. If this is not specified, the default -is a comma `,`. If you want to define a tabulation as a separator, you need -to set the value to the actual tab character and not `\t`. -Optional. - -[id="{version}-plugins-{type}s-{plugin}-skip_empty_columns"] -===== `skip_empty_columns` - - * Value type is <> - * Default value is `false` - -Define whether empty columns should be skipped. -Defaults to false. If set to true, columns containing no value will not get set. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The CSV data in the value of the `source` field will be expanded into a -data structure. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define target field for placing the data. -Defaults to writing to the root of the event. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/date-index.asciidoc b/docs/versioned-plugins/filters/date-index.asciidoc deleted file mode 100644 index de442d9a2..000000000 --- a/docs/versioned-plugins/filters/date-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: date -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::date-v3.1.9.asciidoc[] -include::date-v3.1.8.asciidoc[] -include::date-v3.1.7.asciidoc[] - diff --git a/docs/versioned-plugins/filters/date-v3.1.7.asciidoc b/docs/versioned-plugins/filters/date-v3.1.7.asciidoc deleted file mode 100644 index c8a6c75fe..000000000 --- a/docs/versioned-plugins/filters/date-v3.1.7.asciidoc +++ /dev/null @@ -1,215 +0,0 @@ -:plugin: date -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.7 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-date/blob/v3.1.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Date filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The date filter is used for parsing dates from fields, and then using that -date or timestamp as the logstash timestamp for the event. - -For example, syslog events usually have timestamps like this: -[source,ruby] - "Apr 17 09:32:01" - -You would use the date format `MMM dd HH:mm:ss` to parse this. - -The date filter is especially important for sorting events and for -backfilling old data. If you don't get the date correct in your -event, then searching for them later will likely sort out of order. - -In the absence of this filter, logstash will choose a timestamp based on the -first time it sees the event (at input time), if the timestamp is not already -set in the event. For example, with file input, the timestamp is set to the -time of each read. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Date Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-locale>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timezone>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-locale"] -===== `locale` - - * Value type is <> - * There is no default value for this setting. - -Specify a locale to be used for date parsing using either IETF-BCP47 or POSIX language tag. -Simple examples are `en`,`en-US` for BCP47 or `en_US` for POSIX. - -The locale is mostly necessary to be set for parsing month names (pattern with `MMM`) and -weekday names (pattern with `EEE`). - -If not specified, the platform default will be used but for non-english platform default -an english parser will also be used as a fallback mechanism. - -[id="{version}-plugins-{type}s-{plugin}-match"] -===== `match` - - * Value type is <> - * Default value is `[]` - -An array with field name first, and format patterns following, `[ field, -formats... ]` - -If your time field has multiple possible formats, you can do this: -[source,ruby] - match => [ "logdate", "MMM dd yyyy HH:mm:ss", - "MMM d yyyy HH:mm:ss", "ISO8601" ] - -The above will match a syslog (rfc3164) or `iso8601` timestamp. - -There are a few special exceptions. The following format literals exist -to help you save time and ensure correctness of date parsing. - -* `ISO8601` - should parse any valid ISO8601 timestamp, such as - `2011-04-19T03:44:01.103Z` -* `UNIX` - will parse *float or int* value expressing unix time in seconds since epoch like 1326149001.132 as well as 1326149001 -* `UNIX_MS` - will parse **int** value expressing unix time in milliseconds since epoch like 1366125117000 -* `TAI64N` - will parse tai64n time values - -For example, if you have a field `logdate`, with a value that looks like -`Aug 13 2010 00:03:44`, you would use this configuration: -[source,ruby] - filter { - date { - match => [ "logdate", "MMM dd yyyy HH:mm:ss" ] - } - } - -If your field is nested in your structure, you can use the nested -syntax `[foo][bar]` to match its value. For more information, please refer to -<> - -*More details on the syntax* - -The syntax used for parsing date and time text uses letters to indicate the -kind of time value (month, minute, etc), and a repetition of letters to -indicate the form of that value (2-digit month, full month name, etc). - -Here's what you can use to parse dates and times: - -[horizontal] -y:: year - yyyy::: full year number. Example: `2015`. - yy::: two-digit year. Example: `15` for the year 2015. - -M:: month of the year - M::: minimal-digit month. Example: `1` for January and `12` for December. - MM::: two-digit month. zero-padded if needed. Example: `01` for January and `12` for December - MMM::: abbreviated month text. Example: `Jan` for January. Note: The language used depends on your locale. See the `locale` setting for how to change the language. - MMMM::: full month text, Example: `January`. Note: The language used depends on your locale. - -d:: day of the month - d::: minimal-digit day. Example: `1` for the 1st of the month. - dd::: two-digit day, zero-padded if needed. Example: `01` for the 1st of the month. - -H:: hour of the day (24-hour clock) - H::: minimal-digit hour. Example: `0` for midnight. - HH::: two-digit hour, zero-padded if needed. Example: `00` for midnight. - -m:: minutes of the hour (60 minutes per hour) - m::: minimal-digit minutes. Example: `0`. - mm::: two-digit minutes, zero-padded if needed. Example: `00`. - -s:: seconds of the minute (60 seconds per minute) - s::: minimal-digit seconds. Example: `0`. - ss::: two-digit seconds, zero-padded if needed. Example: `00`. - -S:: fraction of a second - *Maximum precision is milliseconds (`SSS`). Beyond that, zeroes are appended.* - S::: tenths of a second. Example: `0` for a subsecond value `012` - SS::: hundredths of a second. Example: `01` for a subsecond value `01` - SSS::: thousandths of a second. Example: `012` for a subsecond value `012` - -Z:: time zone offset or identity - Z::: Timezone offset structured as HHmm (hour and minutes offset from Zulu/UTC). Example: `-0700`. - ZZ::: Timezone offset structured as HH:mm (colon in between hour and minute offsets). Example: `-07:00`. - ZZZ::: Timezone identity. Example: `America/Los_Angeles`. Note: Valid IDs are listed on the http://joda-time.sourceforge.net/timezones.html[Joda.org available time zones page]. - -z:: time zone names. *Time zone names ('z') cannot be parsed.* - -w:: week of the year - w::: minimal-digit week. Example: `1`. - ww::: two-digit week, zero-padded if needed. Example: `01`. - -D:: day of the year - -e:: day of the week (number) - -E:: day of the week (text) - E, EE, EEE::: Abbreviated day of the week. Example: `Mon`, `Tue`, `Wed`, `Thu`, `Fri`, `Sat`, `Sun`. Note: The actual language of this will depend on your locale. - EEEE::: The full text day of the week. Example: `Monday`, `Tuesday`, ... Note: The actual language of this will depend on your locale. - -For non-formatting syntax, you'll need to put single-quote characters around the value. For example, if you were parsing ISO8601 time, "2015-01-01T01:12:23" that little "T" isn't a valid time format, and you want to say "literally, a T", your format would be this: "yyyy-MM-dd'T'HH:mm:ss" - -Other less common date units, such as era (G), century \(C), am/pm (a), and # more, can be learned about on the -http://www.joda.org/joda-time/key_format.html[joda-time documentation]. - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_dateparsefailure"]` - -Append values to the `tags` field when there has been no -successful match - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"@timestamp"` - -Store the matching timestamp into the given target field. If not provided, -default to updating the `@timestamp` field of the event. - -[id="{version}-plugins-{type}s-{plugin}-timezone"] -===== `timezone` - - * Value type is <> - * There is no default value for this setting. - -Specify a time zone canonical ID to be used for date parsing. -The valid IDs are listed on the http://joda-time.sourceforge.net/timezones.html[Joda.org available time zones page]. -This is useful in case the time zone cannot be extracted from the value, -and is not the platform default. -If this is not specified the platform default will be used. -Canonical ID is good as it takes care of daylight saving time for you -For example, `America/Los_Angeles` or `Europe/Paris` are valid IDs. -This field can be dynamic and include parts of the event using the `%{field}` syntax - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/date-v3.1.8.asciidoc b/docs/versioned-plugins/filters/date-v3.1.8.asciidoc deleted file mode 100644 index 78186ffd3..000000000 --- a/docs/versioned-plugins/filters/date-v3.1.8.asciidoc +++ /dev/null @@ -1,215 +0,0 @@ -:plugin: date -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.8 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-date/blob/v3.1.8/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Date filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The date filter is used for parsing dates from fields, and then using that -date or timestamp as the logstash timestamp for the event. - -For example, syslog events usually have timestamps like this: -[source,ruby] - "Apr 17 09:32:01" - -You would use the date format `MMM dd HH:mm:ss` to parse this. - -The date filter is especially important for sorting events and for -backfilling old data. If you don't get the date correct in your -event, then searching for them later will likely sort out of order. - -In the absence of this filter, logstash will choose a timestamp based on the -first time it sees the event (at input time), if the timestamp is not already -set in the event. For example, with file input, the timestamp is set to the -time of each read. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Date Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-locale>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timezone>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-locale"] -===== `locale` - - * Value type is <> - * There is no default value for this setting. - -Specify a locale to be used for date parsing using either IETF-BCP47 or POSIX language tag. -Simple examples are `en`,`en-US` for BCP47 or `en_US` for POSIX. - -The locale is mostly necessary to be set for parsing month names (pattern with `MMM`) and -weekday names (pattern with `EEE`). - -If not specified, the platform default will be used but for non-english platform default -an english parser will also be used as a fallback mechanism. - -[id="{version}-plugins-{type}s-{plugin}-match"] -===== `match` - - * Value type is <> - * Default value is `[]` - -An array with field name first, and format patterns following, `[ field, -formats... ]` - -If your time field has multiple possible formats, you can do this: -[source,ruby] - match => [ "logdate", "MMM dd yyyy HH:mm:ss", - "MMM d yyyy HH:mm:ss", "ISO8601" ] - -The above will match a syslog (rfc3164) or `iso8601` timestamp. - -There are a few special exceptions. The following format literals exist -to help you save time and ensure correctness of date parsing. - -* `ISO8601` - should parse any valid ISO8601 timestamp, such as - `2011-04-19T03:44:01.103Z` -* `UNIX` - will parse *float or int* value expressing unix time in seconds since epoch like 1326149001.132 as well as 1326149001 -* `UNIX_MS` - will parse **int** value expressing unix time in milliseconds since epoch like 1366125117000 -* `TAI64N` - will parse tai64n time values - -For example, if you have a field `logdate`, with a value that looks like -`Aug 13 2010 00:03:44`, you would use this configuration: -[source,ruby] - filter { - date { - match => [ "logdate", "MMM dd yyyy HH:mm:ss" ] - } - } - -If your field is nested in your structure, you can use the nested -syntax `[foo][bar]` to match its value. For more information, please refer to -<> - -*More details on the syntax* - -The syntax used for parsing date and time text uses letters to indicate the -kind of time value (month, minute, etc), and a repetition of letters to -indicate the form of that value (2-digit month, full month name, etc). - -Here's what you can use to parse dates and times: - -[horizontal] -y:: year - yyyy::: full year number. Example: `2015`. - yy::: two-digit year. Example: `15` for the year 2015. - -M:: month of the year - M::: minimal-digit month. Example: `1` for January and `12` for December. - MM::: two-digit month. zero-padded if needed. Example: `01` for January and `12` for December - MMM::: abbreviated month text. Example: `Jan` for January. Note: The language used depends on your locale. See the `locale` setting for how to change the language. - MMMM::: full month text, Example: `January`. Note: The language used depends on your locale. - -d:: day of the month - d::: minimal-digit day. Example: `1` for the 1st of the month. - dd::: two-digit day, zero-padded if needed. Example: `01` for the 1st of the month. - -H:: hour of the day (24-hour clock) - H::: minimal-digit hour. Example: `0` for midnight. - HH::: two-digit hour, zero-padded if needed. Example: `00` for midnight. - -m:: minutes of the hour (60 minutes per hour) - m::: minimal-digit minutes. Example: `0`. - mm::: two-digit minutes, zero-padded if needed. Example: `00`. - -s:: seconds of the minute (60 seconds per minute) - s::: minimal-digit seconds. Example: `0`. - ss::: two-digit seconds, zero-padded if needed. Example: `00`. - -S:: fraction of a second - *Maximum precision is milliseconds (`SSS`). Beyond that, zeroes are appended.* - S::: tenths of a second. Example: `0` for a subsecond value `012` - SS::: hundredths of a second. Example: `01` for a subsecond value `01` - SSS::: thousandths of a second. Example: `012` for a subsecond value `012` - -Z:: time zone offset or identity - Z::: Timezone offset structured as HHmm (hour and minutes offset from Zulu/UTC). Example: `-0700`. - ZZ::: Timezone offset structured as HH:mm (colon in between hour and minute offsets). Example: `-07:00`. - ZZZ::: Timezone identity. Example: `America/Los_Angeles`. Note: Valid IDs are listed on the http://joda-time.sourceforge.net/timezones.html[Joda.org available time zones page]. - -z:: time zone names. *Time zone names ('z') cannot be parsed.* - -w:: week of the year - w::: minimal-digit week. Example: `1`. - ww::: two-digit week, zero-padded if needed. Example: `01`. - -D:: day of the year - -e:: day of the week (number) - -E:: day of the week (text) - E, EE, EEE::: Abbreviated day of the week. Example: `Mon`, `Tue`, `Wed`, `Thu`, `Fri`, `Sat`, `Sun`. Note: The actual language of this will depend on your locale. - EEEE::: The full text day of the week. Example: `Monday`, `Tuesday`, ... Note: The actual language of this will depend on your locale. - -For non-formatting syntax, you'll need to put single-quote characters around the value. For example, if you were parsing ISO8601 time, "2015-01-01T01:12:23" that little "T" isn't a valid time format, and you want to say "literally, a T", your format would be this: "yyyy-MM-dd'T'HH:mm:ss" - -Other less common date units, such as era (G), century \(C), am/pm (a), and # more, can be learned about on the -http://www.joda.org/joda-time/key_format.html[joda-time documentation]. - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_dateparsefailure"]` - -Append values to the `tags` field when there has been no -successful match - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"@timestamp"` - -Store the matching timestamp into the given target field. If not provided, -default to updating the `@timestamp` field of the event. - -[id="{version}-plugins-{type}s-{plugin}-timezone"] -===== `timezone` - - * Value type is <> - * There is no default value for this setting. - -Specify a time zone canonical ID to be used for date parsing. -The valid IDs are listed on the http://joda-time.sourceforge.net/timezones.html[Joda.org available time zones page]. -This is useful in case the time zone cannot be extracted from the value, -and is not the platform default. -If this is not specified the platform default will be used. -Canonical ID is good as it takes care of daylight saving time for you -For example, `America/Los_Angeles` or `Europe/Paris` are valid IDs. -This field can be dynamic and include parts of the event using the `%{field}` syntax - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/date-v3.1.9.asciidoc b/docs/versioned-plugins/filters/date-v3.1.9.asciidoc deleted file mode 100644 index 18e82c276..000000000 --- a/docs/versioned-plugins/filters/date-v3.1.9.asciidoc +++ /dev/null @@ -1,215 +0,0 @@ -:plugin: date -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.9 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-date/blob/v3.1.9/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Date filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The date filter is used for parsing dates from fields, and then using that -date or timestamp as the logstash timestamp for the event. - -For example, syslog events usually have timestamps like this: -[source,ruby] - "Apr 17 09:32:01" - -You would use the date format `MMM dd HH:mm:ss` to parse this. - -The date filter is especially important for sorting events and for -backfilling old data. If you don't get the date correct in your -event, then searching for them later will likely sort out of order. - -In the absence of this filter, logstash will choose a timestamp based on the -first time it sees the event (at input time), if the timestamp is not already -set in the event. For example, with file input, the timestamp is set to the -time of each read. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Date Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-locale>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timezone>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-locale"] -===== `locale` - - * Value type is <> - * There is no default value for this setting. - -Specify a locale to be used for date parsing using either IETF-BCP47 or POSIX language tag. -Simple examples are `en`,`en-US` for BCP47 or `en_US` for POSIX. - -The locale is mostly necessary to be set for parsing month names (pattern with `MMM`) and -weekday names (pattern with `EEE`). - -If not specified, the platform default will be used but for non-english platform default -an english parser will also be used as a fallback mechanism. - -[id="{version}-plugins-{type}s-{plugin}-match"] -===== `match` - - * Value type is <> - * Default value is `[]` - -An array with field name first, and format patterns following, `[ field, -formats... ]` - -If your time field has multiple possible formats, you can do this: -[source,ruby] - match => [ "logdate", "MMM dd yyyy HH:mm:ss", - "MMM d yyyy HH:mm:ss", "ISO8601" ] - -The above will match a syslog (rfc3164) or `iso8601` timestamp. - -There are a few special exceptions. The following format literals exist -to help you save time and ensure correctness of date parsing. - -* `ISO8601` - should parse any valid ISO8601 timestamp, such as - `2011-04-19T03:44:01.103Z` -* `UNIX` - will parse *float or int* value expressing unix time in seconds since epoch like 1326149001.132 as well as 1326149001 -* `UNIX_MS` - will parse **int** value expressing unix time in milliseconds since epoch like 1366125117000 -* `TAI64N` - will parse tai64n time values - -For example, if you have a field `logdate`, with a value that looks like -`Aug 13 2010 00:03:44`, you would use this configuration: -[source,ruby] - filter { - date { - match => [ "logdate", "MMM dd yyyy HH:mm:ss" ] - } - } - -If your field is nested in your structure, you can use the nested -syntax `[foo][bar]` to match its value. For more information, please refer to -<> - -*More details on the syntax* - -The syntax used for parsing date and time text uses letters to indicate the -kind of time value (month, minute, etc), and a repetition of letters to -indicate the form of that value (2-digit month, full month name, etc). - -Here's what you can use to parse dates and times: - -[horizontal] -y:: year - yyyy::: full year number. Example: `2015`. - yy::: two-digit year. Example: `15` for the year 2015. - -M:: month of the year - M::: minimal-digit month. Example: `1` for January and `12` for December. - MM::: two-digit month. zero-padded if needed. Example: `01` for January and `12` for December - MMM::: abbreviated month text. Example: `Jan` for January. Note: The language used depends on your locale. See the `locale` setting for how to change the language. - MMMM::: full month text, Example: `January`. Note: The language used depends on your locale. - -d:: day of the month - d::: minimal-digit day. Example: `1` for the 1st of the month. - dd::: two-digit day, zero-padded if needed. Example: `01` for the 1st of the month. - -H:: hour of the day (24-hour clock) - H::: minimal-digit hour. Example: `0` for midnight. - HH::: two-digit hour, zero-padded if needed. Example: `00` for midnight. - -m:: minutes of the hour (60 minutes per hour) - m::: minimal-digit minutes. Example: `0`. - mm::: two-digit minutes, zero-padded if needed. Example: `00`. - -s:: seconds of the minute (60 seconds per minute) - s::: minimal-digit seconds. Example: `0`. - ss::: two-digit seconds, zero-padded if needed. Example: `00`. - -S:: fraction of a second - *Maximum precision is milliseconds (`SSS`). Beyond that, zeroes are appended.* - S::: tenths of a second. Example: `0` for a subsecond value `012` - SS::: hundredths of a second. Example: `01` for a subsecond value `01` - SSS::: thousandths of a second. Example: `012` for a subsecond value `012` - -Z:: time zone offset or identity - Z::: Timezone offset structured as HHmm (hour and minutes offset from Zulu/UTC). Example: `-0700`. - ZZ::: Timezone offset structured as HH:mm (colon in between hour and minute offsets). Example: `-07:00`. - ZZZ::: Timezone identity. Example: `America/Los_Angeles`. Note: Valid IDs are listed on the http://joda-time.sourceforge.net/timezones.html[Joda.org available time zones page]. - -z:: time zone names. *Time zone names ('z') cannot be parsed.* - -w:: week of the year - w::: minimal-digit week. Example: `1`. - ww::: two-digit week, zero-padded if needed. Example: `01`. - -D:: day of the year - -e:: day of the week (number) - -E:: day of the week (text) - E, EE, EEE::: Abbreviated day of the week. Example: `Mon`, `Tue`, `Wed`, `Thu`, `Fri`, `Sat`, `Sun`. Note: The actual language of this will depend on your locale. - EEEE::: The full text day of the week. Example: `Monday`, `Tuesday`, ... Note: The actual language of this will depend on your locale. - -For non-formatting syntax, you'll need to put single-quote characters around the value. For example, if you were parsing ISO8601 time, "2015-01-01T01:12:23" that little "T" isn't a valid time format, and you want to say "literally, a T", your format would be this: "yyyy-MM-dd'T'HH:mm:ss" - -Other less common date units, such as era (G), century \(C), am/pm (a), and # more, can be learned about on the -http://www.joda.org/joda-time/key_format.html[joda-time documentation]. - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_dateparsefailure"]` - -Append values to the `tags` field when there has been no -successful match - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"@timestamp"` - -Store the matching timestamp into the given target field. If not provided, -default to updating the `@timestamp` field of the event. - -[id="{version}-plugins-{type}s-{plugin}-timezone"] -===== `timezone` - - * Value type is <> - * There is no default value for this setting. - -Specify a time zone canonical ID to be used for date parsing. -The valid IDs are listed on the http://joda-time.sourceforge.net/timezones.html[Joda.org available time zones page]. -This is useful in case the time zone cannot be extracted from the value, -and is not the platform default. -If this is not specified the platform default will be used. -Canonical ID is good as it takes care of daylight saving time for you -For example, `America/Los_Angeles` or `Europe/Paris` are valid IDs. -This field can be dynamic and include parts of the event using the `%{field}` syntax - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/de_dot-index.asciidoc b/docs/versioned-plugins/filters/de_dot-index.asciidoc deleted file mode 100644 index 11b764fcf..000000000 --- a/docs/versioned-plugins/filters/de_dot-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: de_dot -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::de_dot-v1.0.3.asciidoc[] -include::de_dot-v1.0.2.asciidoc[] -include::de_dot-v1.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/de_dot-v1.0.1.asciidoc b/docs/versioned-plugins/filters/de_dot-v1.0.1.asciidoc deleted file mode 100644 index 967d56ccb..000000000 --- a/docs/versioned-plugins/filters/de_dot-v1.0.1.asciidoc +++ /dev/null @@ -1,82 +0,0 @@ -:plugin: de_dot -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-de_dot/blob/v1.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== De_dot filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter _appears_ to rename fields by replacing `.` characters with a different -separator. In reality, it's a somewhat expensive filter that has to copy the -source field contents to a new destination field (whose name no longer contains -dots), and then remove the corresponding source field. - -It should only be used if no other options are available. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== De_dot Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nested>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * There is no default value for this setting. - -The `fields` array should contain a list of known fields to act on. -If undefined, all top-level fields will be checked. Sub-fields must be -manually specified in the array. For example: `["field.suffix","[foo][bar.suffix]"]` -will result in "field_suffix" and nested or sub field ["foo"]["bar_suffix"] - -WARNING: This is an expensive operation. - - -[id="{version}-plugins-{type}s-{plugin}-nested"] -===== `nested` - - * Value type is <> - * Default value is `false` - -If `nested` is _true_, then create sub-fields instead of replacing dots with -a different separator. - -[id="{version}-plugins-{type}s-{plugin}-separator"] -===== `separator` - - * Value type is <> - * Default value is `"_"` - -Replace dots with this value. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/de_dot-v1.0.2.asciidoc b/docs/versioned-plugins/filters/de_dot-v1.0.2.asciidoc deleted file mode 100644 index 5588ffa4f..000000000 --- a/docs/versioned-plugins/filters/de_dot-v1.0.2.asciidoc +++ /dev/null @@ -1,82 +0,0 @@ -:plugin: de_dot -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-de_dot/blob/v1.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== De_dot filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter _appears_ to rename fields by replacing `.` characters with a different -separator. In reality, it's a somewhat expensive filter that has to copy the -source field contents to a new destination field (whose name no longer contains -dots), and then remove the corresponding source field. - -It should only be used if no other options are available. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== De_dot Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nested>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * There is no default value for this setting. - -The `fields` array should contain a list of known fields to act on. -If undefined, all top-level fields will be checked. Sub-fields must be -manually specified in the array. For example: `["field.suffix","[foo][bar.suffix]"]` -will result in "field_suffix" and nested or sub field ["foo"]["bar_suffix"] - -WARNING: This is an expensive operation. - - -[id="{version}-plugins-{type}s-{plugin}-nested"] -===== `nested` - - * Value type is <> - * Default value is `false` - -If `nested` is _true_, then create sub-fields instead of replacing dots with -a different separator. - -[id="{version}-plugins-{type}s-{plugin}-separator"] -===== `separator` - - * Value type is <> - * Default value is `"_"` - -Replace dots with this value. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/de_dot-v1.0.3.asciidoc b/docs/versioned-plugins/filters/de_dot-v1.0.3.asciidoc deleted file mode 100644 index 542fcf82a..000000000 --- a/docs/versioned-plugins/filters/de_dot-v1.0.3.asciidoc +++ /dev/null @@ -1,82 +0,0 @@ -:plugin: de_dot -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-de_dot/blob/v1.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== De_dot filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter _appears_ to rename fields by replacing `.` characters with a different -separator. In reality, it's a somewhat expensive filter that has to copy the -source field contents to a new destination field (whose name no longer contains -dots), and then remove the corresponding source field. - -It should only be used if no other options are available. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== De_dot Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nested>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-separator>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * There is no default value for this setting. - -The `fields` array should contain a list of known fields to act on. -If undefined, all top-level fields will be checked. Sub-fields must be -manually specified in the array. For example: `["field.suffix","[foo][bar.suffix]"]` -will result in "field_suffix" and nested or sub field ["foo"]["bar_suffix"] - -WARNING: This is an expensive operation. - - -[id="{version}-plugins-{type}s-{plugin}-nested"] -===== `nested` - - * Value type is <> - * Default value is `false` - -If `nested` is _true_, then create sub-fields instead of replacing dots with -a different separator. - -[id="{version}-plugins-{type}s-{plugin}-separator"] -===== `separator` - - * Value type is <> - * Default value is `"_"` - -Replace dots with this value. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/debug-index.asciidoc b/docs/versioned-plugins/filters/debug-index.asciidoc deleted file mode 100644 index 1d5a1b98e..000000000 --- a/docs/versioned-plugins/filters/debug-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: debug -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/filters/dissect-index.asciidoc b/docs/versioned-plugins/filters/dissect-index.asciidoc deleted file mode 100644 index d4215f146..000000000 --- a/docs/versioned-plugins/filters/dissect-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: dissect -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-11-02 -| <> | 2017-06-23 -|======================================================================= - -include::dissect-v1.1.2.asciidoc[] -include::dissect-v1.1.1.asciidoc[] -include::dissect-v1.0.9.asciidoc[] - diff --git a/docs/versioned-plugins/filters/dissect-v1.0.9.asciidoc b/docs/versioned-plugins/filters/dissect-v1.0.9.asciidoc deleted file mode 100644 index 41803fe1a..000000000 --- a/docs/versioned-plugins/filters/dissect-v1.0.9.asciidoc +++ /dev/null @@ -1,213 +0,0 @@ -:plugin: dissect -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.9 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-dissect/blob/v1.0.9/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Dissect filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The Dissect filter is a kind of split operation. Unlike a regular split operation where one delimiter is applied to the whole string, this operation applies a set of delimiters # to a string value. + -Dissect does not use regular expressions and is very fast. + -However, if the structure of your text varies from line to line then Grok is more suitable. + -There is a hybrid case where Dissect can be used to de-structure the section of the line that is reliably repeated and then Grok can be used on the remaining field values with # more regex predictability and less overall work to do. + - -A set of fields and delimiters is called a *dissection*. - -The dissection is described using a set of `%{}` sections: -.... -%{a} - %{b} - %{c} -.... - -A *field* is the text from `%` to `}` inclusive. - -A *delimiter* is the text between `}` and `%` characters. - -[NOTE] -delimiters can't contain these `}{%` characters. - -The config might look like this: -.... - filter { - dissect { - mapping => { - "message" => "%{ts} %{+ts} %{+ts} %{src} %{} %{prog}[%{pid}]: %{msg}" - } - } - } -.... -When dissecting a string from left to right, text is captured upto the first delimiter - this captured text is stored in the first field. This is repeated for each field/# delimiter pair thereafter until the last delimiter is reached, then *the remaining text is stored in the last field*. + - -*The Key:* + -The key is the text between the `%{` and `}`, exclusive of the ?, +, & prefixes and the ordinal suffix. + -`%{?aaa}` - key is `aaa` + -`%{+bbb/3}` - key is `bbb` + -`%{&ccc}` - key is `ccc` + - -*Normal field notation:* + -The found value is added to the Event using the key. + -`%{some_field}` - a normal field has no prefix or suffix - -*Skip field notation:* + -The found value is stored internally but not added to the Event. + -The key, if supplied, is prefixed with a `?`. - -`%{}` is an empty skip field. - -`%{?foo}` is a named skip field. - -*Append field notation:* + -The value is appended to another value or stored if its the first field seen. + -The key is prefixed with a `+`. + -The final value is stored in the Event using the key. + - -[NOTE] -==== -The delimiter found before the field is appended with the value. + -If no delimiter is found before the field, a single space character is used. -==== - -`%{+some_field}` is an append field. + -`%{+some_field/2}` is an append field with an order modifier. - -An order modifier, `/digits`, allows one to reorder the append sequence. + -e.g. for a text of `1 2 3 go`, this `%{+a/2} %{+a/1} %{+a/4} %{+a/3}` will build a key/value of `a => 2 1 go 3` + -Append fields without an order modifier will append in declared order. + -e.g. for a text of `1 2 3 go`, this `%{a} %{b} %{+a}` will build two key/values of `a => 1 3 go, b => 2` + - -*Indirect field notation:* + -The found value is added to the Event using the found value of another field as the key. + -The key is prefixed with a `&`. + -`%{&some_field}` - an indirect field where the key is indirectly sourced from the value of `some_field`. + -e.g. for a text of `error: some_error, some_description`, this `error: %{?err}, %{&err}` will build a key/value of `some_error => some_description`. - -[NOTE] -for append and indirect field the key can refer to a field that already exists in the event before dissection. - -[NOTE] -use a Skip field if you do not want the indirection key/value stored. - -e.g. for a text of `google: 77.98`, this `%{?a}: %{&a}` will build a key/value of `google => 77.98`. - -[NOTE] -=============================== -append and indirect cannot be combined and will fail validation. + -`%{+&something}` - will add a value to the `&something` key, probably not the intended outcome. + -`%{&+something}` will add a value to the `+something` key, again probably unintended. + -=============================== - -*Delimiter repetition:* + -In the source text if a field has variable width padded with delimiters, the padding will be ignored. + -e.g. for texts of: -.... -00000043 ViewReceiver I -000000b3 Peer I -.... -with a dissection of `%{a} %{b} %{c}`; the padding is ignored, `event.get([c]) -> "I"` - -[NOTE] -==== -You probably want to use this filter inside an `if` block. + -This ensures that the event contains a field value with a suitable structure for the dissection. -==== - -For example... -.... -filter { - if [type] == "syslog" or "syslog" in [tags] { - dissect { - mapping => { - "message" => "%{ts} %{+ts} %{+ts} %{src} %{} %{prog}[%{pid}]: %{msg}" - } - } - } -} -.... - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Dissect Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-convert_datatype>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-convert_datatype"] -===== `convert_datatype` - - * Value type is <> - * Default value is `{}` - -With this setting `int` and `float` datatype conversions can be specified. + -These will be done after all `mapping` dissections have taken place. + -Feel free to use this setting on its own without a `mapping` section. + - -For example -[source, ruby] -filter { - dissect { - convert_datatype => { - cpu => "float" - code => "int" - } - } -} - -[id="{version}-plugins-{type}s-{plugin}-mapping"] -===== `mapping` - - * Value type is <> - * Default value is `{}` - -A hash of dissections of `field => value` + -A later dissection can be done on values from a previous dissection or they can be independent. - -For example -[source, ruby] -filter { - dissect { - mapping => { - "message" => "%{field1} %{field2} %{description}" - "description" => "%{field3} %{field4} %{field5}" - } - } -} - -This is useful if you want to keep the field `description` but also -dissect it some more. - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_dissectfailure"]` - -Append values to the `tags` field when dissection fails - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/dissect-v1.1.1.asciidoc b/docs/versioned-plugins/filters/dissect-v1.1.1.asciidoc deleted file mode 100644 index 4078fad74..000000000 --- a/docs/versioned-plugins/filters/dissect-v1.1.1.asciidoc +++ /dev/null @@ -1,283 +0,0 @@ -:plugin: dissect -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.1.1 -:release_date: 2017-11-02 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-dissect/blob/v1.1.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Dissect filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The Dissect filter is a kind of split operation. Unlike a regular split operation where one delimiter is applied to -the whole string, this operation applies a set of delimiters to a string value. + -Dissect does not use regular expressions and is very fast. + -However, if the structure of your text varies from line to line then Grok is more suitable. + -There is a hybrid case where Dissect can be used to de-structure the section of the line that is reliably repeated and -then Grok can be used on the remaining field values with more regex predictability and less overall work to do. + - -A set of fields and delimiters is called a *dissection*. - -The dissection is described using a set of `%{}` sections: -.... -%{a} - %{b} - %{c} -.... - -A *field* is the text from `%` to `}` inclusive. - -A *delimiter* is the text between a `}` and next `%{` characters. - -[NOTE] -Any set of characters that do not fit `%{`, `'not }'`, `}` pattern is a delimiter. - -The config might look like this: -.... - filter { - dissect { - mapping => { - "message" => "%{ts} %{+ts} %{+ts} %{src} %{} %{prog}[%{pid}]: %{msg}" - } - } - } -.... -When dissecting a string from left to right, text is captured upto the first delimiter - this captured text is stored in the first field. -This is repeated for each field/# delimiter pair thereafter until the last delimiter is reached, then *the remaining text is stored in the last field*. + - -*The Key:* + -The key is the text between the `%{` and `}`, exclusive of the ?, +, & prefixes and the ordinal suffix. + -`%{?aaa}` - key is `aaa` + -`%{+bbb/3}` - key is `bbb` + -`%{&ccc}` - key is `ccc` + - -===== Normal field notation -The found value is added to the Event using the key. + -`%{some_field}` - a normal field has no prefix or suffix - -*Skip field notation:* + -The found value is stored internally but not added to the Event. + -The key, if supplied, is prefixed with a `?`. - -`%{}` is an empty skip field. - -`%{?foo}` is a named skip field. - -===== Append field notation -The value is appended to another value or stored if its the first field seen. + -The key is prefixed with a `+`. + -The final value is stored in the Event using the key. + - -[NOTE] -==== -The delimiter found before the field is appended with the value. + -If no delimiter is found before the field, a single space character is used. -==== - -`%{+some_field}` is an append field. + -`%{+some_field/2}` is an append field with an order modifier. - -An order modifier, `/digits`, allows one to reorder the append sequence. + -e.g. for a text of `1 2 3 go`, this `%{+a/2} %{+a/1} %{+a/4} %{+a/3}` will build a key/value of `a => 2 1 go 3` + -Append fields without an order modifier will append in declared order. + -e.g. for a text of `1 2 3 go`, this `%{a} %{b} %{+a}` will build two key/values of `a => 1 3 go, b => 2` + - -===== Indirect field notation -The found value is added to the Event using the found value of another field as the key. + -The key is prefixed with a `&`. + -`%{&some_field}` - an indirect field where the key is indirectly sourced from the value of `some_field`. + -e.g. for a text of `error: some_error, some_description`, this `error: %{?err}, %{&err}` will build a key/value of `some_error => some_description`. - -[NOTE] -for append and indirect field the key can refer to a field that already exists in the event before dissection. - -[NOTE] -use a Skip field if you do not want the indirection key/value stored. - -e.g. for a text of `google: 77.98`, this `%{?a}: %{&a}` will build a key/value of `google => 77.98`. - -[NOTE] -=============================== -append and indirect cannot be combined and will fail validation. + -`%{+&something}` - will add a value to the `&something` key, probably not the intended outcome. + -`%{&+something}` will add a value to the `+something` key, again probably unintended. + -=============================== - -==== Multiple Consecutive Delimiter Handling - -[IMPORTANT] -=============================== -Starting from version 1.1.1 of this plugin, multiple found delimiter handling has changed. -Now multiple consecutive delimiters will be seen as missing fields by default and not padding. -If you are already using Dissect and your source text has fields padded with extra delimiters, -you will need to change your config. Please read the section below. -=============================== - -===== Empty data between delimiters -Given this text as the sample used to create a dissection: -.... -John Smith,Big Oaks,Wood Lane,Hambledown,Canterbury,CB34RY -.... -The created dissection, with 6 fields, is: -.... -%{name},%{addr1},%{addr2},%{addr3},%{city},%{zip} -.... -When a line like this is processed: -.... -Jane Doe,4321 Fifth Avenue,,,New York,87432 -.... -Dissect will create an event with empty fields for `addr2 and addr3` like so: -.... -{ - "name": "Jane Doe", - "addr1": "4321 Fifth Avenue", - "addr2": "", - "addr3": "", - "city": "New York" - "zip": "87432" -} -.... - -===== Delimiters used as padding to visually align fields -*Padding to the right hand side* - -Given these texts as the samples used to create a dissection: -.... -00000043 ViewReceive machine-321 -f3000a3b Calc machine-123 -.... -The dissection, with 3 fields, is: -.... -%{id} %{function->} %{server} -.... -Note, above, the second field has a `->` suffix which tells Dissect to ignore padding to its right. + -Dissect will create these events: -.... -{ - "id": "00000043", - "function": "ViewReceive", - "server": "machine-123" -} -{ - "id": "f3000a3b", - "function": "Calc", - "server": "machine-321" -} -.... -[IMPORTANT] -Always add the `->` suffix to the field on the left of the padding. - -*Padding to the left hand side (to the human eye)* - -Given these texts as the samples used to create a dissection: -.... -00000043 ViewReceive machine-321 -f3000a3b Calc machine-123 -.... -The dissection, with 3 fields, is now: -.... -%{id->} %{function} %{server} -.... -Here the `->` suffix moves to the `id` field because Dissect sees the padding as being to the right of the `id` field. + - -==== Conditional processing - -You probably want to use this filter inside an `if` block. + -This ensures that the event contains a field value with a suitable structure for the dissection. - -For example... -.... -filter { - if [type] == "syslog" or "syslog" in [tags] { - dissect { - mapping => { - "message" => "%{ts} %{+ts} %{+ts} %{src} %{} %{prog}[%{pid}]: %{msg}" - } - } - } -} -.... - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Dissect Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-convert_datatype>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-convert_datatype"] -===== `convert_datatype` - - * Value type is <> - * Default value is `{}` - -With this setting `int` and `float` datatype conversions can be specified. + -These will be done after all `mapping` dissections have taken place. + -Feel free to use this setting on its own without a `mapping` section. + - -For example -[source, ruby] -filter { - dissect { - convert_datatype => { - cpu => "float" - code => "int" - } - } -} - -[id="{version}-plugins-{type}s-{plugin}-mapping"] -===== `mapping` - - * Value type is <> - * Default value is `{}` - -A hash of dissections of `field => value` + -A later dissection can be done on values from a previous dissection or they can be independent. - -For example -[source, ruby] -filter { - dissect { - mapping => { - "message" => "%{field1} %{field2} %{description}" - "description" => "%{field3} %{field4} %{field5}" - } - } -} - -This is useful if you want to keep the field `description` but also -dissect it some more. - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_dissectfailure"]` - -Append values to the `tags` field when dissection fails - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/dissect-v1.1.2.asciidoc b/docs/versioned-plugins/filters/dissect-v1.1.2.asciidoc deleted file mode 100644 index 8fadf53c5..000000000 --- a/docs/versioned-plugins/filters/dissect-v1.1.2.asciidoc +++ /dev/null @@ -1,283 +0,0 @@ -:plugin: dissect -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.1.2 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-dissect/blob/v1.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Dissect filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The Dissect filter is a kind of split operation. Unlike a regular split operation where one delimiter is applied to -the whole string, this operation applies a set of delimiters to a string value. + -Dissect does not use regular expressions and is very fast. + -However, if the structure of your text varies from line to line then Grok is more suitable. + -There is a hybrid case where Dissect can be used to de-structure the section of the line that is reliably repeated and -then Grok can be used on the remaining field values with more regex predictability and less overall work to do. + - -A set of fields and delimiters is called a *dissection*. - -The dissection is described using a set of `%{}` sections: -.... -%{a} - %{b} - %{c} -.... - -A *field* is the text from `%` to `}` inclusive. - -A *delimiter* is the text between a `}` and next `%{` characters. - -[NOTE] -Any set of characters that do not fit `%{`, `'not }'`, `}` pattern is a delimiter. - -The config might look like this: -.... - filter { - dissect { - mapping => { - "message" => "%{ts} %{+ts} %{+ts} %{src} %{} %{prog}[%{pid}]: %{msg}" - } - } - } -.... -When dissecting a string from left to right, text is captured upto the first delimiter - this captured text is stored in the first field. -This is repeated for each field/# delimiter pair thereafter until the last delimiter is reached, then *the remaining text is stored in the last field*. + - -*The Key:* + -The key is the text between the `%{` and `}`, exclusive of the ?, +, & prefixes and the ordinal suffix. + -`%{?aaa}` - key is `aaa` + -`%{+bbb/3}` - key is `bbb` + -`%{&ccc}` - key is `ccc` + - -===== Normal field notation -The found value is added to the Event using the key. + -`%{some_field}` - a normal field has no prefix or suffix - -*Skip field notation:* + -The found value is stored internally but not added to the Event. + -The key, if supplied, is prefixed with a `?`. - -`%{}` is an empty skip field. - -`%{?foo}` is a named skip field. - -===== Append field notation -The value is appended to another value or stored if its the first field seen. + -The key is prefixed with a `+`. + -The final value is stored in the Event using the key. + - -[NOTE] -==== -The delimiter found before the field is appended with the value. + -If no delimiter is found before the field, a single space character is used. -==== - -`%{+some_field}` is an append field. + -`%{+some_field/2}` is an append field with an order modifier. - -An order modifier, `/digits`, allows one to reorder the append sequence. + -e.g. for a text of `1 2 3 go`, this `%{+a/2} %{+a/1} %{+a/4} %{+a/3}` will build a key/value of `a => 2 1 go 3` + -Append fields without an order modifier will append in declared order. + -e.g. for a text of `1 2 3 go`, this `%{a} %{b} %{+a}` will build two key/values of `a => 1 3 go, b => 2` + - -===== Indirect field notation -The found value is added to the Event using the found value of another field as the key. + -The key is prefixed with a `&`. + -`%{&some_field}` - an indirect field where the key is indirectly sourced from the value of `some_field`. + -e.g. for a text of `error: some_error, some_description`, this `error: %{?err}, %{&err}` will build a key/value of `some_error => some_description`. - -[NOTE] -for append and indirect field the key can refer to a field that already exists in the event before dissection. - -[NOTE] -use a Skip field if you do not want the indirection key/value stored. - -e.g. for a text of `google: 77.98`, this `%{?a}: %{&a}` will build a key/value of `google => 77.98`. - -[NOTE] -=============================== -append and indirect cannot be combined and will fail validation. + -`%{+&something}` - will add a value to the `&something` key, probably not the intended outcome. + -`%{&+something}` will add a value to the `+something` key, again probably unintended. + -=============================== - -==== Multiple Consecutive Delimiter Handling - -[IMPORTANT] -=============================== -Starting from version 1.1.1 of this plugin, multiple found delimiter handling has changed. -Now multiple consecutive delimiters will be seen as missing fields by default and not padding. -If you are already using Dissect and your source text has fields padded with extra delimiters, -you will need to change your config. Please read the section below. -=============================== - -===== Empty data between delimiters -Given this text as the sample used to create a dissection: -.... -John Smith,Big Oaks,Wood Lane,Hambledown,Canterbury,CB34RY -.... -The created dissection, with 6 fields, is: -.... -%{name},%{addr1},%{addr2},%{addr3},%{city},%{zip} -.... -When a line like this is processed: -.... -Jane Doe,4321 Fifth Avenue,,,New York,87432 -.... -Dissect will create an event with empty fields for `addr2 and addr3` like so: -.... -{ - "name": "Jane Doe", - "addr1": "4321 Fifth Avenue", - "addr2": "", - "addr3": "", - "city": "New York" - "zip": "87432" -} -.... - -===== Delimiters used as padding to visually align fields -*Padding to the right hand side* - -Given these texts as the samples used to create a dissection: -.... -00000043 ViewReceive machine-321 -f3000a3b Calc machine-123 -.... -The dissection, with 3 fields, is: -.... -%{id} %{function->} %{server} -.... -Note, above, the second field has a `->` suffix which tells Dissect to ignore padding to its right. + -Dissect will create these events: -.... -{ - "id": "00000043", - "function": "ViewReceive", - "server": "machine-123" -} -{ - "id": "f3000a3b", - "function": "Calc", - "server": "machine-321" -} -.... -[IMPORTANT] -Always add the `->` suffix to the field on the left of the padding. - -*Padding to the left hand side (to the human eye)* - -Given these texts as the samples used to create a dissection: -.... -00000043 ViewReceive machine-321 -f3000a3b Calc machine-123 -.... -The dissection, with 3 fields, is now: -.... -%{id->} %{function} %{server} -.... -Here the `->` suffix moves to the `id` field because Dissect sees the padding as being to the right of the `id` field. + - -==== Conditional processing - -You probably want to use this filter inside an `if` block. + -This ensures that the event contains a field value with a suitable structure for the dissection. - -For example... -.... -filter { - if [type] == "syslog" or "syslog" in [tags] { - dissect { - mapping => { - "message" => "%{ts} %{+ts} %{+ts} %{src} %{} %{prog}[%{pid}]: %{msg}" - } - } - } -} -.... - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Dissect Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-convert_datatype>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-convert_datatype"] -===== `convert_datatype` - - * Value type is <> - * Default value is `{}` - -With this setting `int` and `float` datatype conversions can be specified. + -These will be done after all `mapping` dissections have taken place. + -Feel free to use this setting on its own without a `mapping` section. + - -For example -[source, ruby] -filter { - dissect { - convert_datatype => { - cpu => "float" - code => "int" - } - } -} - -[id="{version}-plugins-{type}s-{plugin}-mapping"] -===== `mapping` - - * Value type is <> - * Default value is `{}` - -A hash of dissections of `field => value` + -A later dissection can be done on values from a previous dissection or they can be independent. - -For example -[source, ruby] -filter { - dissect { - mapping => { - "message" => "%{field1} %{field2} %{description}" - "description" => "%{field3} %{field4} %{field5}" - } - } -} - -This is useful if you want to keep the field `description` but also -dissect it some more. - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_dissectfailure"]` - -Append values to the `tags` field when dissection fails - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/dns-index.asciidoc b/docs/versioned-plugins/filters/dns-index.asciidoc deleted file mode 100644 index a30b89960..000000000 --- a/docs/versioned-plugins/filters/dns-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: dns -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-08-09 -| <> | 2017-06-23 -|======================================================================= - -include::dns-v3.0.7.asciidoc[] -include::dns-v3.0.6.asciidoc[] -include::dns-v3.0.5.asciidoc[] -include::dns-v3.0.4.asciidoc[] - diff --git a/docs/versioned-plugins/filters/dns-v3.0.4.asciidoc b/docs/versioned-plugins/filters/dns-v3.0.4.asciidoc deleted file mode 100644 index 49e8094b7..000000000 --- a/docs/versioned-plugins/filters/dns-v3.0.4.asciidoc +++ /dev/null @@ -1,161 +0,0 @@ -:plugin: dns -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-dns/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Dns filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The DNS filter performs a lookup (either an A record/CNAME record lookup -or a reverse lookup at the PTR record) on records specified under the -`reverse` arrays or respectively under the `resolve` arrays. - -The config should look like this: -[source,ruby] - filter { - dns { - reverse => [ "source_host", "field_with_address" ] - resolve => [ "field_with_fqdn" ] - action => "replace" - } - } - -This filter, like all filters, only processes 1 event at a time, so the use -of this plugin can significantly slow down your pipeline's throughput if you -have a high latency network. By way of example, if each DNS lookup takes 2 -milliseconds, the maximum throughput you can achieve with a single filter -worker is 500 events per second (1000 milliseconds / 2 milliseconds). - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Dns Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>, one of `["append", "replace"]`|No -| <<{version}-plugins-{type}s-{plugin}-failed_cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failed_cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hit_cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hit_cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hostsfile>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nameserver>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resolve>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reverse>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-action"] -===== `action` - - * Value can be any of: `append`, `replace` - * Default value is `"append"` - -Determine what action to do: append or replace the values in the fields -specified under `reverse` and `resolve`. - -[id="{version}-plugins-{type}s-{plugin}-failed_cache_size"] -===== `failed_cache_size` - - * Value type is <> - * Default value is `0` - -cache size for failed requests - -[id="{version}-plugins-{type}s-{plugin}-failed_cache_ttl"] -===== `failed_cache_ttl` - - * Value type is <> - * Default value is `5` - -how long to cache failed requests (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-hit_cache_size"] -===== `hit_cache_size` - - * Value type is <> - * Default value is `0` - -set the size of cache for successful requests - -[id="{version}-plugins-{type}s-{plugin}-hit_cache_ttl"] -===== `hit_cache_ttl` - - * Value type is <> - * Default value is `60` - -how long to cache successful requests (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-hostsfile"] -===== `hostsfile` - - * Value type is <> - * There is no default value for this setting. - -Use custom hosts file(s). For example: `["/var/db/my_custom_hosts"]` - -[id="{version}-plugins-{type}s-{plugin}-max_retries"] -===== `max_retries` - - * Value type is <> - * Default value is `2` - -number of times to retry a failed resolve/reverse - -[id="{version}-plugins-{type}s-{plugin}-nameserver"] -===== `nameserver` - - * Value type is <> - * There is no default value for this setting. - -Use custom nameserver(s). For example: `["8.8.8.8", "8.8.4.4"]` - -[id="{version}-plugins-{type}s-{plugin}-resolve"] -===== `resolve` - - * Value type is <> - * There is no default value for this setting. - -Forward resolve one or more fields. - -[id="{version}-plugins-{type}s-{plugin}-reverse"] -===== `reverse` - - * Value type is <> - * There is no default value for this setting. - -Reverse resolve one or more fields. - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `0.5` - -`resolv` calls will be wrapped in a timeout instance - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/dns-v3.0.5.asciidoc b/docs/versioned-plugins/filters/dns-v3.0.5.asciidoc deleted file mode 100644 index 4902ba804..000000000 --- a/docs/versioned-plugins/filters/dns-v3.0.5.asciidoc +++ /dev/null @@ -1,161 +0,0 @@ -:plugin: dns -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-08-09 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-dns/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Dns filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The DNS filter performs a lookup (either an A record/CNAME record lookup -or a reverse lookup at the PTR record) on records specified under the -`reverse` arrays or respectively under the `resolve` arrays. - -The config should look like this: -[source,ruby] - filter { - dns { - reverse => [ "source_host", "field_with_address" ] - resolve => [ "field_with_fqdn" ] - action => "replace" - } - } - -This filter, like all filters, only processes 1 event at a time, so the use -of this plugin can significantly slow down your pipeline's throughput if you -have a high latency network. By way of example, if each DNS lookup takes 2 -milliseconds, the maximum throughput you can achieve with a single filter -worker is 500 events per second (1000 milliseconds / 2 milliseconds). - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Dns Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>, one of `["append", "replace"]`|No -| <<{version}-plugins-{type}s-{plugin}-failed_cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failed_cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hit_cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hit_cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hostsfile>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nameserver>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resolve>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reverse>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-action"] -===== `action` - - * Value can be any of: `append`, `replace` - * Default value is `"append"` - -Determine what action to do: append or replace the values in the fields -specified under `reverse` and `resolve`. - -[id="{version}-plugins-{type}s-{plugin}-failed_cache_size"] -===== `failed_cache_size` - - * Value type is <> - * Default value is `0` - -cache size for failed requests - -[id="{version}-plugins-{type}s-{plugin}-failed_cache_ttl"] -===== `failed_cache_ttl` - - * Value type is <> - * Default value is `5` - -how long to cache failed requests (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-hit_cache_size"] -===== `hit_cache_size` - - * Value type is <> - * Default value is `0` - -set the size of cache for successful requests - -[id="{version}-plugins-{type}s-{plugin}-hit_cache_ttl"] -===== `hit_cache_ttl` - - * Value type is <> - * Default value is `60` - -how long to cache successful requests (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-hostsfile"] -===== `hostsfile` - - * Value type is <> - * There is no default value for this setting. - -Use custom hosts file(s). For example: `["/var/db/my_custom_hosts"]` - -[id="{version}-plugins-{type}s-{plugin}-max_retries"] -===== `max_retries` - - * Value type is <> - * Default value is `2` - -number of times to retry a failed resolve/reverse - -[id="{version}-plugins-{type}s-{plugin}-nameserver"] -===== `nameserver` - - * Value type is <> - * There is no default value for this setting. - -Use custom nameserver(s). For example: `["8.8.8.8", "8.8.4.4"]` - -[id="{version}-plugins-{type}s-{plugin}-resolve"] -===== `resolve` - - * Value type is <> - * There is no default value for this setting. - -Forward resolve one or more fields. - -[id="{version}-plugins-{type}s-{plugin}-reverse"] -===== `reverse` - - * Value type is <> - * There is no default value for this setting. - -Reverse resolve one or more fields. - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `0.5` - -`resolv` calls will be wrapped in a timeout instance - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/dns-v3.0.6.asciidoc b/docs/versioned-plugins/filters/dns-v3.0.6.asciidoc deleted file mode 100644 index 07f93a6b5..000000000 --- a/docs/versioned-plugins/filters/dns-v3.0.6.asciidoc +++ /dev/null @@ -1,161 +0,0 @@ -:plugin: dns -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-dns/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Dns filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The DNS filter performs a lookup (either an A record/CNAME record lookup -or a reverse lookup at the PTR record) on records specified under the -`reverse` arrays or respectively under the `resolve` arrays. - -The config should look like this: -[source,ruby] - filter { - dns { - reverse => [ "source_host", "field_with_address" ] - resolve => [ "field_with_fqdn" ] - action => "replace" - } - } - -This filter, like all filters, only processes 1 event at a time, so the use -of this plugin can significantly slow down your pipeline's throughput if you -have a high latency network. By way of example, if each DNS lookup takes 2 -milliseconds, the maximum throughput you can achieve with a single filter -worker is 500 events per second (1000 milliseconds / 2 milliseconds). - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Dns Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>, one of `["append", "replace"]`|No -| <<{version}-plugins-{type}s-{plugin}-failed_cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failed_cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hit_cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hit_cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hostsfile>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nameserver>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resolve>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reverse>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-action"] -===== `action` - - * Value can be any of: `append`, `replace` - * Default value is `"append"` - -Determine what action to do: append or replace the values in the fields -specified under `reverse` and `resolve`. - -[id="{version}-plugins-{type}s-{plugin}-failed_cache_size"] -===== `failed_cache_size` - - * Value type is <> - * Default value is `0` - -cache size for failed requests - -[id="{version}-plugins-{type}s-{plugin}-failed_cache_ttl"] -===== `failed_cache_ttl` - - * Value type is <> - * Default value is `5` - -how long to cache failed requests (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-hit_cache_size"] -===== `hit_cache_size` - - * Value type is <> - * Default value is `0` - -set the size of cache for successful requests - -[id="{version}-plugins-{type}s-{plugin}-hit_cache_ttl"] -===== `hit_cache_ttl` - - * Value type is <> - * Default value is `60` - -how long to cache successful requests (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-hostsfile"] -===== `hostsfile` - - * Value type is <> - * There is no default value for this setting. - -Use custom hosts file(s). For example: `["/var/db/my_custom_hosts"]` - -[id="{version}-plugins-{type}s-{plugin}-max_retries"] -===== `max_retries` - - * Value type is <> - * Default value is `2` - -number of times to retry a failed resolve/reverse - -[id="{version}-plugins-{type}s-{plugin}-nameserver"] -===== `nameserver` - - * Value type is <> - * There is no default value for this setting. - -Use custom nameserver(s). For example: `["8.8.8.8", "8.8.4.4"]` - -[id="{version}-plugins-{type}s-{plugin}-resolve"] -===== `resolve` - - * Value type is <> - * There is no default value for this setting. - -Forward resolve one or more fields. - -[id="{version}-plugins-{type}s-{plugin}-reverse"] -===== `reverse` - - * Value type is <> - * There is no default value for this setting. - -Reverse resolve one or more fields. - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `0.5` - -`resolv` calls will be wrapped in a timeout instance - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/dns-v3.0.7.asciidoc b/docs/versioned-plugins/filters/dns-v3.0.7.asciidoc deleted file mode 100644 index 49f3db696..000000000 --- a/docs/versioned-plugins/filters/dns-v3.0.7.asciidoc +++ /dev/null @@ -1,161 +0,0 @@ -:plugin: dns -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.7 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-dns/blob/v3.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Dns filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The DNS filter performs a lookup (either an A record/CNAME record lookup -or a reverse lookup at the PTR record) on records specified under the -`reverse` arrays or respectively under the `resolve` arrays. - -The config should look like this: -[source,ruby] - filter { - dns { - reverse => [ "source_host", "field_with_address" ] - resolve => [ "field_with_fqdn" ] - action => "replace" - } - } - -This filter, like all filters, only processes 1 event at a time, so the use -of this plugin can significantly slow down your pipeline's throughput if you -have a high latency network. By way of example, if each DNS lookup takes 2 -milliseconds, the maximum throughput you can achieve with a single filter -worker is 500 events per second (1000 milliseconds / 2 milliseconds). - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Dns Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>, one of `["append", "replace"]`|No -| <<{version}-plugins-{type}s-{plugin}-failed_cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failed_cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hit_cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hit_cache_ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hostsfile>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nameserver>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resolve>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reverse>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-action"] -===== `action` - - * Value can be any of: `append`, `replace` - * Default value is `"append"` - -Determine what action to do: append or replace the values in the fields -specified under `reverse` and `resolve`. - -[id="{version}-plugins-{type}s-{plugin}-failed_cache_size"] -===== `failed_cache_size` - - * Value type is <> - * Default value is `0` - -cache size for failed requests - -[id="{version}-plugins-{type}s-{plugin}-failed_cache_ttl"] -===== `failed_cache_ttl` - - * Value type is <> - * Default value is `5` - -how long to cache failed requests (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-hit_cache_size"] -===== `hit_cache_size` - - * Value type is <> - * Default value is `0` - -set the size of cache for successful requests - -[id="{version}-plugins-{type}s-{plugin}-hit_cache_ttl"] -===== `hit_cache_ttl` - - * Value type is <> - * Default value is `60` - -how long to cache successful requests (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-hostsfile"] -===== `hostsfile` - - * Value type is <> - * There is no default value for this setting. - -Use custom hosts file(s). For example: `["/var/db/my_custom_hosts"]` - -[id="{version}-plugins-{type}s-{plugin}-max_retries"] -===== `max_retries` - - * Value type is <> - * Default value is `2` - -number of times to retry a failed resolve/reverse - -[id="{version}-plugins-{type}s-{plugin}-nameserver"] -===== `nameserver` - - * Value type is <> - * There is no default value for this setting. - -Use custom nameserver(s). For example: `["8.8.8.8", "8.8.4.4"]` - -[id="{version}-plugins-{type}s-{plugin}-resolve"] -===== `resolve` - - * Value type is <> - * There is no default value for this setting. - -Forward resolve one or more fields. - -[id="{version}-plugins-{type}s-{plugin}-reverse"] -===== `reverse` - - * Value type is <> - * There is no default value for this setting. - -Reverse resolve one or more fields. - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `0.5` - -`resolv` calls will be wrapped in a timeout instance - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/drop-index.asciidoc b/docs/versioned-plugins/filters/drop-index.asciidoc deleted file mode 100644 index eb062420f..000000000 --- a/docs/versioned-plugins/filters/drop-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: drop -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::drop-v3.0.5.asciidoc[] -include::drop-v3.0.4.asciidoc[] -include::drop-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/filters/drop-v3.0.3.asciidoc b/docs/versioned-plugins/filters/drop-v3.0.3.asciidoc deleted file mode 100644 index 0f98ffc4c..000000000 --- a/docs/versioned-plugins/filters/drop-v3.0.3.asciidoc +++ /dev/null @@ -1,77 +0,0 @@ -:plugin: drop -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-drop/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Drop filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Drop filter. - -Drops everything that gets to this filter. - -This is best used in combination with conditionals, for example: -[source,ruby] - filter { - if [loglevel] == "debug" { - drop { } - } - } - -The above will only pass events to the drop filter if the loglevel field is -`debug`. This will cause all events matching to be dropped. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Drop Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-percentage>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-percentage"] -===== `percentage` - - * Value type is <> - * Default value is `100` - -Drop all the events within a pre-configured percentage. - -This is useful if you just need a percentage but not the whole. - -Example, to only drop around 40% of the events that have the field loglevel with value "debug". - - filter { - if [loglevel] == "debug" { - drop { - percentage => 40 - } - } - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/drop-v3.0.4.asciidoc b/docs/versioned-plugins/filters/drop-v3.0.4.asciidoc deleted file mode 100644 index 50747961d..000000000 --- a/docs/versioned-plugins/filters/drop-v3.0.4.asciidoc +++ /dev/null @@ -1,77 +0,0 @@ -:plugin: drop -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-drop/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Drop filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Drop filter. - -Drops everything that gets to this filter. - -This is best used in combination with conditionals, for example: -[source,ruby] - filter { - if [loglevel] == "debug" { - drop { } - } - } - -The above will only pass events to the drop filter if the loglevel field is -`debug`. This will cause all events matching to be dropped. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Drop Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-percentage>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-percentage"] -===== `percentage` - - * Value type is <> - * Default value is `100` - -Drop all the events within a pre-configured percentage. - -This is useful if you just need a percentage but not the whole. - -Example, to only drop around 40% of the events that have the field loglevel with value "debug". - - filter { - if [loglevel] == "debug" { - drop { - percentage => 40 - } - } - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/drop-v3.0.5.asciidoc b/docs/versioned-plugins/filters/drop-v3.0.5.asciidoc deleted file mode 100644 index d07312a0e..000000000 --- a/docs/versioned-plugins/filters/drop-v3.0.5.asciidoc +++ /dev/null @@ -1,77 +0,0 @@ -:plugin: drop -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-drop/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Drop filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Drop filter. - -Drops everything that gets to this filter. - -This is best used in combination with conditionals, for example: -[source,ruby] - filter { - if [loglevel] == "debug" { - drop { } - } - } - -The above will only pass events to the drop filter if the loglevel field is -`debug`. This will cause all events matching to be dropped. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Drop Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-percentage>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-percentage"] -===== `percentage` - - * Value type is <> - * Default value is `100` - -Drop all the events within a pre-configured percentage. - -This is useful if you just need a percentage but not the whole. - -Example, to only drop around 40% of the events that have the field loglevel with value "debug". - - filter { - if [loglevel] == "debug" { - drop { - percentage => 40 - } - } - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/elapsed-index.asciidoc b/docs/versioned-plugins/filters/elapsed-index.asciidoc deleted file mode 100644 index 7c99bd2cc..000000000 --- a/docs/versioned-plugins/filters/elapsed-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: elapsed -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::elapsed-v4.0.4.asciidoc[] -include::elapsed-v4.0.3.asciidoc[] -include::elapsed-v4.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/filters/elapsed-v4.0.2.asciidoc b/docs/versioned-plugins/filters/elapsed-v4.0.2.asciidoc deleted file mode 100644 index 4af9a417b..000000000 --- a/docs/versioned-plugins/filters/elapsed-v4.0.2.asciidoc +++ /dev/null @@ -1,168 +0,0 @@ -:plugin: elapsed -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-elapsed/blob/v4.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Elapsed filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The elapsed filter tracks a pair of start/end events and uses their -timestamps to calculate the elapsed time between them. - -The filter has been developed to track the execution time of processes and -other long tasks. - -The configuration looks like this: -[source,ruby] - filter { - elapsed { - start_tag => "start event tag" - end_tag => "end event tag" - unique_id_field => "id field name" - timeout => seconds - new_event_on_match => true/false - } - } - -The events managed by this filter must have some particular properties. -The event describing the start of the task (the "start event") must contain -a tag equal to `start_tag`. On the other side, the event describing the end -of the task (the "end event") must contain a tag equal to `end_tag`. Both -these two kinds of event need to own an ID field which identify uniquely that -particular task. The name of this field is stored in `unique_id_field`. - -You can use a Grok filter to prepare the events for the elapsed filter. -An example of configuration can be: -[source,ruby] - filter { - grok { - match => { "message" => "%{TIMESTAMP_ISO8601} START id: (?.*)" } - add_tag => [ "taskStarted" ] - } - - grok { - match => { "message" => "%{TIMESTAMP_ISO8601} END id: (?.*)" } - add_tag => [ "taskTerminated" ] - } - - elapsed { - start_tag => "taskStarted" - end_tag => "taskTerminated" - unique_id_field => "task_id" - } - } - -The elapsed filter collects all the "start events". If two, or more, "start -events" have the same ID, only the first one is recorded, the others are -discarded. - -When an "end event" matching a previously collected "start event" is -received, there is a match. The configuration property `new_event_on_match` -tells where to insert the elapsed information: they can be added to the -"end event" or a new "match event" can be created. Both events store the -following information: - -* the tags `elapsed` and `elapsed_match` -* the field `elapsed_time` with the difference, in seconds, between - the two events timestamps -* an ID filed with the task ID -* the field `elapsed_timestamp_start` with the timestamp of the start event - -If the "end event" does not arrive before "timeout" seconds, the -"start event" is discarded and an "expired event" is generated. This event -contains: - -* the tags `elapsed` and `elapsed_expired_error` -* a field called `elapsed_time` with the age, in seconds, of the - "start event" -* an ID filed with the task ID -* the field `elapsed_timestamp_start` with the timestamp of the "start event" - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Elapsed Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-end_tag>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-new_event_on_match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-start_tag>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-unique_id_field>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-end_tag"] -===== `end_tag` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the tag identifying the "end event" - -[id="{version}-plugins-{type}s-{plugin}-new_event_on_match"] -===== `new_event_on_match` - - * Value type is <> - * Default value is `false` - -This property manage what to do when an "end event" matches a "start event". -If it's set to `false` (default value), the elapsed information are added -to the "end event"; if it's set to `true` a new "match event" is created. - -[id="{version}-plugins-{type}s-{plugin}-start_tag"] -===== `start_tag` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the tag identifying the "start event" - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `1800` - -The amount of seconds after an "end event" can be considered lost. -The corresponding "start event" is discarded and an "expired event" -is generated. The default value is 30 minutes (1800 seconds). - -[id="{version}-plugins-{type}s-{plugin}-unique_id_field"] -===== `unique_id_field` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the field containing the task ID. -This value must uniquely identify the task in the system, otherwise -it's impossible to match the couple of events. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/elapsed-v4.0.3.asciidoc b/docs/versioned-plugins/filters/elapsed-v4.0.3.asciidoc deleted file mode 100644 index f60b65943..000000000 --- a/docs/versioned-plugins/filters/elapsed-v4.0.3.asciidoc +++ /dev/null @@ -1,168 +0,0 @@ -:plugin: elapsed -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.3 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-elapsed/blob/v4.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Elapsed filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The elapsed filter tracks a pair of start/end events and uses their -timestamps to calculate the elapsed time between them. - -The filter has been developed to track the execution time of processes and -other long tasks. - -The configuration looks like this: -[source,ruby] - filter { - elapsed { - start_tag => "start event tag" - end_tag => "end event tag" - unique_id_field => "id field name" - timeout => seconds - new_event_on_match => true/false - } - } - -The events managed by this filter must have some particular properties. -The event describing the start of the task (the "start event") must contain -a tag equal to `start_tag`. On the other side, the event describing the end -of the task (the "end event") must contain a tag equal to `end_tag`. Both -these two kinds of event need to own an ID field which identify uniquely that -particular task. The name of this field is stored in `unique_id_field`. - -You can use a Grok filter to prepare the events for the elapsed filter. -An example of configuration can be: -[source,ruby] - filter { - grok { - match => { "message" => "%{TIMESTAMP_ISO8601} START id: (?.*)" } - add_tag => [ "taskStarted" ] - } - - grok { - match => { "message" => "%{TIMESTAMP_ISO8601} END id: (?.*)" } - add_tag => [ "taskTerminated" ] - } - - elapsed { - start_tag => "taskStarted" - end_tag => "taskTerminated" - unique_id_field => "task_id" - } - } - -The elapsed filter collects all the "start events". If two, or more, "start -events" have the same ID, only the first one is recorded, the others are -discarded. - -When an "end event" matching a previously collected "start event" is -received, there is a match. The configuration property `new_event_on_match` -tells where to insert the elapsed information: they can be added to the -"end event" or a new "match event" can be created. Both events store the -following information: - -* the tags `elapsed` and `elapsed_match` -* the field `elapsed_time` with the difference, in seconds, between - the two events timestamps -* an ID filed with the task ID -* the field `elapsed_timestamp_start` with the timestamp of the start event - -If the "end event" does not arrive before "timeout" seconds, the -"start event" is discarded and an "expired event" is generated. This event -contains: - -* the tags `elapsed` and `elapsed_expired_error` -* a field called `elapsed_time` with the age, in seconds, of the - "start event" -* an ID filed with the task ID -* the field `elapsed_timestamp_start` with the timestamp of the "start event" - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Elapsed Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-end_tag>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-new_event_on_match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-start_tag>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-unique_id_field>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-end_tag"] -===== `end_tag` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the tag identifying the "end event" - -[id="{version}-plugins-{type}s-{plugin}-new_event_on_match"] -===== `new_event_on_match` - - * Value type is <> - * Default value is `false` - -This property manage what to do when an "end event" matches a "start event". -If it's set to `false` (default value), the elapsed information are added -to the "end event"; if it's set to `true` a new "match event" is created. - -[id="{version}-plugins-{type}s-{plugin}-start_tag"] -===== `start_tag` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the tag identifying the "start event" - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `1800` - -The amount of seconds after an "end event" can be considered lost. -The corresponding "start event" is discarded and an "expired event" -is generated. The default value is 30 minutes (1800 seconds). - -[id="{version}-plugins-{type}s-{plugin}-unique_id_field"] -===== `unique_id_field` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the field containing the task ID. -This value must uniquely identify the task in the system, otherwise -it's impossible to match the couple of events. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/elapsed-v4.0.4.asciidoc b/docs/versioned-plugins/filters/elapsed-v4.0.4.asciidoc deleted file mode 100644 index aa3f1765d..000000000 --- a/docs/versioned-plugins/filters/elapsed-v4.0.4.asciidoc +++ /dev/null @@ -1,168 +0,0 @@ -:plugin: elapsed -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.4 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-elapsed/blob/v4.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Elapsed filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The elapsed filter tracks a pair of start/end events and uses their -timestamps to calculate the elapsed time between them. - -The filter has been developed to track the execution time of processes and -other long tasks. - -The configuration looks like this: -[source,ruby] - filter { - elapsed { - start_tag => "start event tag" - end_tag => "end event tag" - unique_id_field => "id field name" - timeout => seconds - new_event_on_match => true/false - } - } - -The events managed by this filter must have some particular properties. -The event describing the start of the task (the "start event") must contain -a tag equal to `start_tag`. On the other side, the event describing the end -of the task (the "end event") must contain a tag equal to `end_tag`. Both -these two kinds of event need to own an ID field which identify uniquely that -particular task. The name of this field is stored in `unique_id_field`. - -You can use a Grok filter to prepare the events for the elapsed filter. -An example of configuration can be: -[source,ruby] - filter { - grok { - match => { "message" => "%{TIMESTAMP_ISO8601} START id: (?.*)" } - add_tag => [ "taskStarted" ] - } - - grok { - match => { "message" => "%{TIMESTAMP_ISO8601} END id: (?.*)" } - add_tag => [ "taskTerminated" ] - } - - elapsed { - start_tag => "taskStarted" - end_tag => "taskTerminated" - unique_id_field => "task_id" - } - } - -The elapsed filter collects all the "start events". If two, or more, "start -events" have the same ID, only the first one is recorded, the others are -discarded. - -When an "end event" matching a previously collected "start event" is -received, there is a match. The configuration property `new_event_on_match` -tells where to insert the elapsed information: they can be added to the -"end event" or a new "match event" can be created. Both events store the -following information: - -* the tags `elapsed` and `elapsed_match` -* the field `elapsed_time` with the difference, in seconds, between - the two events timestamps -* an ID filed with the task ID -* the field `elapsed_timestamp_start` with the timestamp of the start event - -If the "end event" does not arrive before "timeout" seconds, the -"start event" is discarded and an "expired event" is generated. This event -contains: - -* the tags `elapsed` and `elapsed_expired_error` -* a field called `elapsed_time` with the age, in seconds, of the - "start event" -* an ID filed with the task ID -* the field `elapsed_timestamp_start` with the timestamp of the "start event" - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Elapsed Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-end_tag>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-new_event_on_match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-start_tag>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-unique_id_field>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-end_tag"] -===== `end_tag` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the tag identifying the "end event" - -[id="{version}-plugins-{type}s-{plugin}-new_event_on_match"] -===== `new_event_on_match` - - * Value type is <> - * Default value is `false` - -This property manage what to do when an "end event" matches a "start event". -If it's set to `false` (default value), the elapsed information are added -to the "end event"; if it's set to `true` a new "match event" is created. - -[id="{version}-plugins-{type}s-{plugin}-start_tag"] -===== `start_tag` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the tag identifying the "start event" - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `1800` - -The amount of seconds after an "end event" can be considered lost. -The corresponding "start event" is discarded and an "expired event" -is generated. The default value is 30 minutes (1800 seconds). - -[id="{version}-plugins-{type}s-{plugin}-unique_id_field"] -===== `unique_id_field` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the field containing the task ID. -This value must uniquely identify the task in the system, otherwise -it's impossible to match the couple of events. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/elasticsearch-index.asciidoc b/docs/versioned-plugins/filters/elasticsearch-index.asciidoc deleted file mode 100644 index 659140872..000000000 --- a/docs/versioned-plugins/filters/elasticsearch-index.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -:plugin: elasticsearch -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-09-29 -| <> | 2017-08-15 -| <> | 2017-07-28 -| <> | 2017-06-23 -| <> | 2017-05-03 -|======================================================================= - -include::elasticsearch-v3.2.1.asciidoc[] -include::elasticsearch-v3.2.0.asciidoc[] -include::elasticsearch-v3.1.6.asciidoc[] -include::elasticsearch-v3.1.5.asciidoc[] -include::elasticsearch-v3.1.4.asciidoc[] -include::elasticsearch-v3.1.3.asciidoc[] - diff --git a/docs/versioned-plugins/filters/elasticsearch-v3.1.3.asciidoc b/docs/versioned-plugins/filters/elasticsearch-v3.1.3.asciidoc deleted file mode 100644 index 918be3965..000000000 --- a/docs/versioned-plugins/filters/elasticsearch-v3.1.3.asciidoc +++ /dev/null @@ -1,236 +0,0 @@ -:plugin: elasticsearch -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-05-03 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-elasticsearch/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Elasticsearch - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -.Compatibility Note -[NOTE] -================================================================================ -Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] -called `http.content_type.required`. If this option is set to `true`, and you -are using Logstash 2.4 through 5.2, you need to update the Elasticsearch filter -plugin to version 3.1.1 or higher. - -================================================================================ - -Search Elasticsearch for a previous log event and copy some fields from it -into the current event. Below are two complete examples of how this filter might -be used. - -The first example uses the legacy 'query' parameter where the user is limited to an Elasticsearch query_string. -Whenever logstash receives an "end" event, it uses this elasticsearch -filter to find the matching "start" event based on some operation identifier. -Then it copies the `@timestamp` field from the "start" event into a new field on -the "end" event. Finally, using a combination of the "date" filter and the -"ruby" filter, we calculate the time duration in hours between the two events. -[source,ruby] --------------------------------------------------- - if [type] == "end" { - elasticsearch { - hosts => ["es-server"] - query => "type:start AND operation:%{[opid]}" - fields => { "@timestamp" => "started" } - } - - date { - match => ["[started]", "ISO8601"] - target => "[started]" - } - - ruby { - code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" - } - } - - The example below reproduces the above example but utilises the query_template. This query_template represents a full - Elasticsearch query DSL and supports the standard Logstash field substitution syntax. The example below issues - the same query as the first example but uses the template shown. - - if [type] == "end" { - elasticsearch { - hosts => ["es-server"] - query_template => "template.json" - } - - date { - match => ["[started]", "ISO8601"] - target => "[started]" - } - - ruby { - code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" - } - } - - - - template.json: - - { - "query": { - "query_string": { - "query": "type:start AND operation:%{[opid]}" - } - }, - "_source": ["@timestamp", "started"] - } - -As illustrated above, through the use of 'opid', fields from the Logstash events can be referenced within the template. -The template will be populated per event prior to being used to query Elasticsearch. - --------------------------------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Elasticsearch Filter Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-enable_sort>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-result_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sort>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ca_file"] -===== `ca_file` - - * Value type is <> - * There is no default value for this setting. - -SSL Certificate Authority file - -[id="{version}-plugins-{type}s-{plugin}-enable_sort"] -===== `enable_sort` - - * Value type is <> - * Default value is `true` - -Whether results should be sorted or not - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * Default value is `{}` - -Array of fields to copy from old event (found via elasticsearch) into new event - -[id="{version}-plugins-{type}s-{plugin}-hosts"] -===== `hosts` - - * Value type is <> - * Default value is `["localhost:9200"]` - -List of elasticsearch hosts to use for querying. - -[id="{version}-plugins-{type}s-{plugin}-index"] -===== `index` - - * Value type is <> - * Default value is `""` - -Comma-delimited list of index names to search; use `_all` or empty string to perform the operation on all indices - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Basic Auth - password - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * Value type is <> - * There is no default value for this setting. - -Elasticsearch query string. Read the Elasticsearch query string documentation. -for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-query-string-query.html#query-string-syntax - -[id="{version}-plugins-{type}s-{plugin}-query_template"] -===== `query_template` - - * Value type is <> - * There is no default value for this setting. - -File path to elasticsearch query in DSL format. Read the Elasticsearch query documentation -for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html - -[id="{version}-plugins-{type}s-{plugin}-result_size"] -===== `result_size` - - * Value type is <> - * Default value is `1` - -How many results to return - -[id="{version}-plugins-{type}s-{plugin}-sort"] -===== `sort` - - * Value type is <> - * Default value is `"@timestamp:desc"` - -Comma-delimited list of `:` pairs that define the sort order - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `false` - -SSL - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_elasticsearch_lookup_failure"]` - -Tags the event on failure to look up geo information. This can be used in later analysis. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Basic Auth - username - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/elasticsearch-v3.1.4.asciidoc b/docs/versioned-plugins/filters/elasticsearch-v3.1.4.asciidoc deleted file mode 100644 index a352c7eaa..000000000 --- a/docs/versioned-plugins/filters/elasticsearch-v3.1.4.asciidoc +++ /dev/null @@ -1,237 +0,0 @@ -:plugin: elasticsearch -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-elasticsearch/blob/v3.1.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Elasticsearch filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -.Compatibility Note -[NOTE] -================================================================================ -Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] -called `http.content_type.required`. If this option is set to `true`, and you -are using Logstash 2.4 through 5.2, you need to update the Elasticsearch filter -plugin to version 3.1.1 or higher. - -================================================================================ - -Search Elasticsearch for a previous log event and copy some fields from it -into the current event. Below are two complete examples of how this filter might -be used. - -The first example uses the legacy 'query' parameter where the user is limited to an Elasticsearch query_string. -Whenever logstash receives an "end" event, it uses this elasticsearch -filter to find the matching "start" event based on some operation identifier. -Then it copies the `@timestamp` field from the "start" event into a new field on -the "end" event. Finally, using a combination of the "date" filter and the -"ruby" filter, we calculate the time duration in hours between the two events. -[source,ruby] --------------------------------------------------- - if [type] == "end" { - elasticsearch { - hosts => ["es-server"] - query => "type:start AND operation:%{[opid]}" - fields => { "@timestamp" => "started" } - } - - date { - match => ["[started]", "ISO8601"] - target => "[started]" - } - - ruby { - code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" - } - } - - The example below reproduces the above example but utilises the query_template. This query_template represents a full - Elasticsearch query DSL and supports the standard Logstash field substitution syntax. The example below issues - the same query as the first example but uses the template shown. - - if [type] == "end" { - elasticsearch { - hosts => ["es-server"] - query_template => "template.json" - } - - date { - match => ["[started]", "ISO8601"] - target => "[started]" - } - - ruby { - code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" - } - } - - - - template.json: - - { - "query": { - "query_string": { - "query": "type:start AND operation:%{[opid]}" - } - }, - "_source": ["@timestamp", "started"] - } - -As illustrated above, through the use of 'opid', fields from the Logstash events can be referenced within the template. -The template will be populated per event prior to being used to query Elasticsearch. - --------------------------------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Elasticsearch Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-enable_sort>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-result_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sort>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ca_file"] -===== `ca_file` - - * Value type is <> - * There is no default value for this setting. - -SSL Certificate Authority file - -[id="{version}-plugins-{type}s-{plugin}-enable_sort"] -===== `enable_sort` - - * Value type is <> - * Default value is `true` - -Whether results should be sorted or not - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * Default value is `{}` - -Array of fields to copy from old event (found via elasticsearch) into new event - -[id="{version}-plugins-{type}s-{plugin}-hosts"] -===== `hosts` - - * Value type is <> - * Default value is `["localhost:9200"]` - -List of elasticsearch hosts to use for querying. - -[id="{version}-plugins-{type}s-{plugin}-index"] -===== `index` - - * Value type is <> - * Default value is `""` - -Comma-delimited list of index names to search; use `_all` or empty string to perform the operation on all indices - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Basic Auth - password - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * Value type is <> - * There is no default value for this setting. - -Elasticsearch query string. Read the Elasticsearch query string documentation. -for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-query-string-query.html#query-string-syntax - -[id="{version}-plugins-{type}s-{plugin}-query_template"] -===== `query_template` - - * Value type is <> - * There is no default value for this setting. - -File path to elasticsearch query in DSL format. Read the Elasticsearch query documentation -for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html - -[id="{version}-plugins-{type}s-{plugin}-result_size"] -===== `result_size` - - * Value type is <> - * Default value is `1` - -How many results to return - -[id="{version}-plugins-{type}s-{plugin}-sort"] -===== `sort` - - * Value type is <> - * Default value is `"@timestamp:desc"` - -Comma-delimited list of `:` pairs that define the sort order - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `false` - -SSL - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_elasticsearch_lookup_failure"]` - -Tags the event on failure to look up geo information. This can be used in later analysis. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Basic Auth - username - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/elasticsearch-v3.1.5.asciidoc b/docs/versioned-plugins/filters/elasticsearch-v3.1.5.asciidoc deleted file mode 100644 index b6d8e1bf2..000000000 --- a/docs/versioned-plugins/filters/elasticsearch-v3.1.5.asciidoc +++ /dev/null @@ -1,237 +0,0 @@ -:plugin: elasticsearch -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.5 -:release_date: 2017-07-28 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-elasticsearch/blob/v3.1.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Elasticsearch filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -.Compatibility Note -[NOTE] -================================================================================ -Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] -called `http.content_type.required`. If this option is set to `true`, and you -are using Logstash 2.4 through 5.2, you need to update the Elasticsearch filter -plugin to version 3.1.1 or higher. - -================================================================================ - -Search Elasticsearch for a previous log event and copy some fields from it -into the current event. Below are two complete examples of how this filter might -be used. - -The first example uses the legacy 'query' parameter where the user is limited to an Elasticsearch query_string. -Whenever logstash receives an "end" event, it uses this elasticsearch -filter to find the matching "start" event based on some operation identifier. -Then it copies the `@timestamp` field from the "start" event into a new field on -the "end" event. Finally, using a combination of the "date" filter and the -"ruby" filter, we calculate the time duration in hours between the two events. -[source,ruby] --------------------------------------------------- - if [type] == "end" { - elasticsearch { - hosts => ["es-server"] - query => "type:start AND operation:%{[opid]}" - fields => { "@timestamp" => "started" } - } - - date { - match => ["[started]", "ISO8601"] - target => "[started]" - } - - ruby { - code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" - } - } - - The example below reproduces the above example but utilises the query_template. This query_template represents a full - Elasticsearch query DSL and supports the standard Logstash field substitution syntax. The example below issues - the same query as the first example but uses the template shown. - - if [type] == "end" { - elasticsearch { - hosts => ["es-server"] - query_template => "template.json" - } - - date { - match => ["[started]", "ISO8601"] - target => "[started]" - } - - ruby { - code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" - } - } - - - - template.json: - - { - "query": { - "query_string": { - "query": "type:start AND operation:%{[opid]}" - } - }, - "_source": ["@timestamp", "started"] - } - -As illustrated above, through the use of 'opid', fields from the Logstash events can be referenced within the template. -The template will be populated per event prior to being used to query Elasticsearch. - --------------------------------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Elasticsearch Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-enable_sort>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-result_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sort>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ca_file"] -===== `ca_file` - - * Value type is <> - * There is no default value for this setting. - -SSL Certificate Authority file - -[id="{version}-plugins-{type}s-{plugin}-enable_sort"] -===== `enable_sort` - - * Value type is <> - * Default value is `true` - -Whether results should be sorted or not - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * Default value is `{}` - -Array of fields to copy from old event (found via elasticsearch) into new event - -[id="{version}-plugins-{type}s-{plugin}-hosts"] -===== `hosts` - - * Value type is <> - * Default value is `["localhost:9200"]` - -List of elasticsearch hosts to use for querying. - -[id="{version}-plugins-{type}s-{plugin}-index"] -===== `index` - - * Value type is <> - * Default value is `""` - -Comma-delimited list of index names to search; use `_all` or empty string to perform the operation on all indices - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Basic Auth - password - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * Value type is <> - * There is no default value for this setting. - -Elasticsearch query string. Read the Elasticsearch query string documentation. -for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-query-string-query.html#query-string-syntax - -[id="{version}-plugins-{type}s-{plugin}-query_template"] -===== `query_template` - - * Value type is <> - * There is no default value for this setting. - -File path to elasticsearch query in DSL format. Read the Elasticsearch query documentation -for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html - -[id="{version}-plugins-{type}s-{plugin}-result_size"] -===== `result_size` - - * Value type is <> - * Default value is `1` - -How many results to return - -[id="{version}-plugins-{type}s-{plugin}-sort"] -===== `sort` - - * Value type is <> - * Default value is `"@timestamp:desc"` - -Comma-delimited list of `:` pairs that define the sort order - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `false` - -SSL - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_elasticsearch_lookup_failure"]` - -Tags the event on failure to look up geo information. This can be used in later analysis. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Basic Auth - username - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/elasticsearch-v3.1.6.asciidoc b/docs/versioned-plugins/filters/elasticsearch-v3.1.6.asciidoc deleted file mode 100644 index af3ffb9b8..000000000 --- a/docs/versioned-plugins/filters/elasticsearch-v3.1.6.asciidoc +++ /dev/null @@ -1,237 +0,0 @@ -:plugin: elasticsearch -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.6 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-elasticsearch/blob/v3.1.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Elasticsearch filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -.Compatibility Note -[NOTE] -================================================================================ -Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] -called `http.content_type.required`. If this option is set to `true`, and you -are using Logstash 2.4 through 5.2, you need to update the Elasticsearch filter -plugin to version 3.1.1 or higher. - -================================================================================ - -Search Elasticsearch for a previous log event and copy some fields from it -into the current event. Below are two complete examples of how this filter might -be used. - -The first example uses the legacy 'query' parameter where the user is limited to an Elasticsearch query_string. -Whenever logstash receives an "end" event, it uses this elasticsearch -filter to find the matching "start" event based on some operation identifier. -Then it copies the `@timestamp` field from the "start" event into a new field on -the "end" event. Finally, using a combination of the "date" filter and the -"ruby" filter, we calculate the time duration in hours between the two events. -[source,ruby] --------------------------------------------------- - if [type] == "end" { - elasticsearch { - hosts => ["es-server"] - query => "type:start AND operation:%{[opid]}" - fields => { "@timestamp" => "started" } - } - - date { - match => ["[started]", "ISO8601"] - target => "[started]" - } - - ruby { - code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" - } - } - - The example below reproduces the above example but utilises the query_template. This query_template represents a full - Elasticsearch query DSL and supports the standard Logstash field substitution syntax. The example below issues - the same query as the first example but uses the template shown. - - if [type] == "end" { - elasticsearch { - hosts => ["es-server"] - query_template => "template.json" - } - - date { - match => ["[started]", "ISO8601"] - target => "[started]" - } - - ruby { - code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" - } - } - - - - template.json: - - { - "query": { - "query_string": { - "query": "type:start AND operation:%{[opid]}" - } - }, - "_source": ["@timestamp", "started"] - } - -As illustrated above, through the use of 'opid', fields from the Logstash events can be referenced within the template. -The template will be populated per event prior to being used to query Elasticsearch. - --------------------------------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Elasticsearch Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-enable_sort>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-result_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sort>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ca_file"] -===== `ca_file` - - * Value type is <> - * There is no default value for this setting. - -SSL Certificate Authority file - -[id="{version}-plugins-{type}s-{plugin}-enable_sort"] -===== `enable_sort` - - * Value type is <> - * Default value is `true` - -Whether results should be sorted or not - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * Default value is `{}` - -Array of fields to copy from old event (found via elasticsearch) into new event - -[id="{version}-plugins-{type}s-{plugin}-hosts"] -===== `hosts` - - * Value type is <> - * Default value is `["localhost:9200"]` - -List of elasticsearch hosts to use for querying. - -[id="{version}-plugins-{type}s-{plugin}-index"] -===== `index` - - * Value type is <> - * Default value is `""` - -Comma-delimited list of index names to search; use `_all` or empty string to perform the operation on all indices - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Basic Auth - password - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * Value type is <> - * There is no default value for this setting. - -Elasticsearch query string. Read the Elasticsearch query string documentation. -for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-query-string-query.html#query-string-syntax - -[id="{version}-plugins-{type}s-{plugin}-query_template"] -===== `query_template` - - * Value type is <> - * There is no default value for this setting. - -File path to elasticsearch query in DSL format. Read the Elasticsearch query documentation -for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html - -[id="{version}-plugins-{type}s-{plugin}-result_size"] -===== `result_size` - - * Value type is <> - * Default value is `1` - -How many results to return - -[id="{version}-plugins-{type}s-{plugin}-sort"] -===== `sort` - - * Value type is <> - * Default value is `"@timestamp:desc"` - -Comma-delimited list of `:` pairs that define the sort order - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `false` - -SSL - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_elasticsearch_lookup_failure"]` - -Tags the event on failure to look up geo information. This can be used in later analysis. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Basic Auth - username - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/elasticsearch-v3.2.0.asciidoc b/docs/versioned-plugins/filters/elasticsearch-v3.2.0.asciidoc deleted file mode 100644 index ed802d177..000000000 --- a/docs/versioned-plugins/filters/elasticsearch-v3.2.0.asciidoc +++ /dev/null @@ -1,238 +0,0 @@ -:plugin: elasticsearch -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.0 -:release_date: 2017-09-29 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-elasticsearch/blob/v3.2.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Elasticsearch filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -.Compatibility Note -[NOTE] -================================================================================ -Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] -called `http.content_type.required`. If this option is set to `true`, and you -are using Logstash 2.4 through 5.2, you need to update the Elasticsearch filter -plugin to version 3.1.1 or higher. - -================================================================================ - -Search Elasticsearch for a previous log event and copy some fields from it -into the current event. Below are two complete examples of how this filter might -be used. - -The first example uses the legacy 'query' parameter where the user is limited to an Elasticsearch query_string. -Whenever logstash receives an "end" event, it uses this elasticsearch -filter to find the matching "start" event based on some operation identifier. -Then it copies the `@timestamp` field from the "start" event into a new field on -the "end" event. Finally, using a combination of the "date" filter and the -"ruby" filter, we calculate the time duration in hours between the two events. -[source,ruby] --------------------------------------------------- - if [type] == "end" { - elasticsearch { - hosts => ["es-server"] - query => "type:start AND operation:%{[opid]}" - fields => { "@timestamp" => "started" } - } - - date { - match => ["[started]", "ISO8601"] - target => "[started]" - } - - ruby { - code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" - } - } - - The example below reproduces the above example but utilises the query_template. This query_template represents a full - Elasticsearch query DSL and supports the standard Logstash field substitution syntax. The example below issues - the same query as the first example but uses the template shown. - - if [type] == "end" { - elasticsearch { - hosts => ["es-server"] - query_template => "template.json" - } - - date { - match => ["[started]", "ISO8601"] - target => "[started]" - } - - ruby { - code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" - } - } - - - - template.json: - - { - "query": { - "query_string": { - "query": "type:start AND operation:%{[opid]}" - } - }, - "_source": ["@timestamp", "started"] - } - -As illustrated above, through the use of 'opid', fields from the Logstash events can be referenced within the template. -The template will be populated per event prior to being used to query Elasticsearch. - --------------------------------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Elasticsearch Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-enable_sort>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-result_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sort>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ca_file"] -===== `ca_file` - - * Value type is <> - * There is no default value for this setting. - -SSL Certificate Authority file - -[id="{version}-plugins-{type}s-{plugin}-enable_sort"] -===== `enable_sort` - - * Value type is <> - * Default value is `true` - -Whether results should be sorted or not - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * Default value is `{}` - -Array of fields to copy from old event (found via elasticsearch) into new event - -[id="{version}-plugins-{type}s-{plugin}-hosts"] -===== `hosts` - - * Value type is <> - * Default value is `["localhost:9200"]` - -List of elasticsearch hosts to use for querying. - -[id="{version}-plugins-{type}s-{plugin}-index"] -===== `index` - - * Value type is <> - * Default value is `""` - -Comma-delimited list of index names to search; use `_all` or empty string to perform the operation on all indices. -Field substitution (e.g. `index-name-%{date_field}`) is available - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Basic Auth - password - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * Value type is <> - * There is no default value for this setting. - -Elasticsearch query string. Read the Elasticsearch query string documentation. -for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-query-string-query.html#query-string-syntax - -[id="{version}-plugins-{type}s-{plugin}-query_template"] -===== `query_template` - - * Value type is <> - * There is no default value for this setting. - -File path to elasticsearch query in DSL format. Read the Elasticsearch query documentation -for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html - -[id="{version}-plugins-{type}s-{plugin}-result_size"] -===== `result_size` - - * Value type is <> - * Default value is `1` - -How many results to return - -[id="{version}-plugins-{type}s-{plugin}-sort"] -===== `sort` - - * Value type is <> - * Default value is `"@timestamp:desc"` - -Comma-delimited list of `:` pairs that define the sort order - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `false` - -SSL - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_elasticsearch_lookup_failure"]` - -Tags the event on failure to look up geo information. This can be used in later analysis. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Basic Auth - username - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/elasticsearch-v3.2.1.asciidoc b/docs/versioned-plugins/filters/elasticsearch-v3.2.1.asciidoc deleted file mode 100644 index 64ed0aa39..000000000 --- a/docs/versioned-plugins/filters/elasticsearch-v3.2.1.asciidoc +++ /dev/null @@ -1,238 +0,0 @@ -:plugin: elasticsearch -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.1 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-elasticsearch/blob/v3.2.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Elasticsearch filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -.Compatibility Note -[NOTE] -================================================================================ -Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] -called `http.content_type.required`. If this option is set to `true`, and you -are using Logstash 2.4 through 5.2, you need to update the Elasticsearch filter -plugin to version 3.1.1 or higher. - -================================================================================ - -Search Elasticsearch for a previous log event and copy some fields from it -into the current event. Below are two complete examples of how this filter might -be used. - -The first example uses the legacy 'query' parameter where the user is limited to an Elasticsearch query_string. -Whenever logstash receives an "end" event, it uses this elasticsearch -filter to find the matching "start" event based on some operation identifier. -Then it copies the `@timestamp` field from the "start" event into a new field on -the "end" event. Finally, using a combination of the "date" filter and the -"ruby" filter, we calculate the time duration in hours between the two events. -[source,ruby] --------------------------------------------------- - if [type] == "end" { - elasticsearch { - hosts => ["es-server"] - query => "type:start AND operation:%{[opid]}" - fields => { "@timestamp" => "started" } - } - - date { - match => ["[started]", "ISO8601"] - target => "[started]" - } - - ruby { - code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" - } - } - - The example below reproduces the above example but utilises the query_template. This query_template represents a full - Elasticsearch query DSL and supports the standard Logstash field substitution syntax. The example below issues - the same query as the first example but uses the template shown. - - if [type] == "end" { - elasticsearch { - hosts => ["es-server"] - query_template => "template.json" - } - - date { - match => ["[started]", "ISO8601"] - target => "[started]" - } - - ruby { - code => "event['duration_hrs'] = (event['@timestamp'] - event['started']) / 3600 rescue nil" - } - } - - - - template.json: - - { - "query": { - "query_string": { - "query": "type:start AND operation:%{[opid]}" - } - }, - "_source": ["@timestamp", "started"] - } - -As illustrated above, through the use of 'opid', fields from the Logstash events can be referenced within the template. -The template will be populated per event prior to being used to query Elasticsearch. - --------------------------------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Elasticsearch Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-enable_sort>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-result_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sort>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ca_file"] -===== `ca_file` - - * Value type is <> - * There is no default value for this setting. - -SSL Certificate Authority file - -[id="{version}-plugins-{type}s-{plugin}-enable_sort"] -===== `enable_sort` - - * Value type is <> - * Default value is `true` - -Whether results should be sorted or not - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * Default value is `{}` - -Array of fields to copy from old event (found via elasticsearch) into new event - -[id="{version}-plugins-{type}s-{plugin}-hosts"] -===== `hosts` - - * Value type is <> - * Default value is `["localhost:9200"]` - -List of elasticsearch hosts to use for querying. - -[id="{version}-plugins-{type}s-{plugin}-index"] -===== `index` - - * Value type is <> - * Default value is `""` - -Comma-delimited list of index names to search; use `_all` or empty string to perform the operation on all indices. -Field substitution (e.g. `index-name-%{date_field}`) is available - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Basic Auth - password - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * Value type is <> - * There is no default value for this setting. - -Elasticsearch query string. Read the Elasticsearch query string documentation. -for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/master/query-dsl-query-string-query.html#query-string-syntax - -[id="{version}-plugins-{type}s-{plugin}-query_template"] -===== `query_template` - - * Value type is <> - * There is no default value for this setting. - -File path to elasticsearch query in DSL format. Read the Elasticsearch query documentation -for more info at: https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html - -[id="{version}-plugins-{type}s-{plugin}-result_size"] -===== `result_size` - - * Value type is <> - * Default value is `1` - -How many results to return - -[id="{version}-plugins-{type}s-{plugin}-sort"] -===== `sort` - - * Value type is <> - * Default value is `"@timestamp:desc"` - -Comma-delimited list of `:` pairs that define the sort order - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `false` - -SSL - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_elasticsearch_lookup_failure"]` - -Tags the event on failure to look up previous log event information. This can be used in later analysis. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Basic Auth - username - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/emoji-index.asciidoc b/docs/versioned-plugins/filters/emoji-index.asciidoc deleted file mode 100644 index 53f09a9ea..000000000 --- a/docs/versioned-plugins/filters/emoji-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: emoji -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::emoji-v1.0.2.asciidoc[] -include::emoji-v1.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/emoji-v1.0.1.asciidoc b/docs/versioned-plugins/filters/emoji-v1.0.1.asciidoc deleted file mode 100644 index e88c97498..000000000 --- a/docs/versioned-plugins/filters/emoji-v1.0.1.asciidoc +++ /dev/null @@ -1,176 +0,0 @@ -:plugin: emoji -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-emoji/blob/v1.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Emoji filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This plugin maps the severity names or numeric codes as defined in -https://tools.ietf.org/html/rfc3164#section-4.1.1[RFC 3164] and -https://tools.ietf.org/html/rfc5424#section-6.2.1[RFC 5424] to the emoji -as defined in the configuration. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Emoji Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-fallback>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-override>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sev_alert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sev_critical>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sev_debug>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sev_emergency>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sev_error>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sev_info>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sev_notice>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sev_warning>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-fallback"] -===== `fallback` - - * Value type is <> - * There is no default value for this setting. - -In case no match is found in the event, this will add a default emoji, which -will always populate `target`, if the match failed. - -For example, if we have configured `fallback => "`❓`"`, using this -dictionary: -[source,ruby] - foo: 👤 - -Then, if logstash received an event with the field `foo` set to 👤, the -target field would be set to 👤. However, if logstash received an event with -`foo` set to `nope`, then the target field would still be populated, but -with the value of ❓. -This configuration can be dynamic and include parts of the event using the -`%{field}` syntax. - -[id="{version}-plugins-{type}s-{plugin}-field"] -===== `field` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the logstash event field containing the value to be compared for -a match by the emoji filter (e.g. `severity`). - -If this field is an array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-override"] -===== `override` - - * Value type is <> - * Default value is `false` - -If the target field already exists, this configuration item specifies -whether the filter should skip being rewritten as an emoji (default) or -overwrite the target field value with the emoji value. - -[id="{version}-plugins-{type}s-{plugin}-sev_alert"] -===== `sev_alert` - - * Value type is <> - * Default value is `"🚨"` - -`sev_alert` selects the emoji/unicode character for Alert severity - -[id="{version}-plugins-{type}s-{plugin}-sev_critical"] -===== `sev_critical` - - * Value type is <> - * Default value is `"🔥"` - -`sev_critical` selects the emoji/unicode character for Critical severity - -[id="{version}-plugins-{type}s-{plugin}-sev_debug"] -===== `sev_debug` - - * Value type is <> - * Default value is `"🐛"` - -`sev_debug` selects the emoji/unicode character for Debug severity - -[id="{version}-plugins-{type}s-{plugin}-sev_emergency"] -===== `sev_emergency` - - * Value type is <> - * Default value is `"💥"` - -`sev_emergency` selects the emoji/unicode character for Emergency severity - -[id="{version}-plugins-{type}s-{plugin}-sev_error"] -===== `sev_error` - - * Value type is <> - * Default value is `"❌"` - -`sev_error` selects the emoji/unicode character for Error severity - -[id="{version}-plugins-{type}s-{plugin}-sev_info"] -===== `sev_info` - - * Value type is <> - * Default value is `"ℹ️"` - -`sev_info` selects the emoji/unicode character for Informational severity - -[id="{version}-plugins-{type}s-{plugin}-sev_notice"] -===== `sev_notice` - - * Value type is <> - * Default value is `"👀"` - -`sev_notice` selects the emoji/unicode character for Notice severity - -[id="{version}-plugins-{type}s-{plugin}-sev_warning"] -===== `sev_warning` - - * Value type is <> - * Default value is `"⚠️"` - -`sev_warning` selects the emoji/unicode character for Warning severity - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"emoji"` - -The target field you wish to populate with the emoji. The default -is a field named `emoji`. Set this to the same value as the source (`field`) -if you want to do a substitution, in this case filter will allways succeed. -This will overwrite the old value of the source field! - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/emoji-v1.0.2.asciidoc b/docs/versioned-plugins/filters/emoji-v1.0.2.asciidoc deleted file mode 100644 index 3b8e33d62..000000000 --- a/docs/versioned-plugins/filters/emoji-v1.0.2.asciidoc +++ /dev/null @@ -1,176 +0,0 @@ -:plugin: emoji -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-emoji/blob/v1.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Emoji filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This plugin maps the severity names or numeric codes as defined in -https://tools.ietf.org/html/rfc3164#section-4.1.1[RFC 3164] and -https://tools.ietf.org/html/rfc5424#section-6.2.1[RFC 5424] to the emoji -as defined in the configuration. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Emoji Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-fallback>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-override>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sev_alert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sev_critical>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sev_debug>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sev_emergency>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sev_error>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sev_info>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sev_notice>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sev_warning>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-fallback"] -===== `fallback` - - * Value type is <> - * There is no default value for this setting. - -In case no match is found in the event, this will add a default emoji, which -will always populate `target`, if the match failed. - -For example, if we have configured `fallback => "`❓`"`, using this -dictionary: -[source,ruby] - foo: 👤 - -Then, if logstash received an event with the field `foo` set to 👤, the -target field would be set to 👤. However, if logstash received an event with -`foo` set to `nope`, then the target field would still be populated, but -with the value of ❓. -This configuration can be dynamic and include parts of the event using the -`%{field}` syntax. - -[id="{version}-plugins-{type}s-{plugin}-field"] -===== `field` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the logstash event field containing the value to be compared for -a match by the emoji filter (e.g. `severity`). - -If this field is an array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-override"] -===== `override` - - * Value type is <> - * Default value is `false` - -If the target field already exists, this configuration item specifies -whether the filter should skip being rewritten as an emoji (default) or -overwrite the target field value with the emoji value. - -[id="{version}-plugins-{type}s-{plugin}-sev_alert"] -===== `sev_alert` - - * Value type is <> - * Default value is `"🚨"` - -`sev_alert` selects the emoji/unicode character for Alert severity - -[id="{version}-plugins-{type}s-{plugin}-sev_critical"] -===== `sev_critical` - - * Value type is <> - * Default value is `"🔥"` - -`sev_critical` selects the emoji/unicode character for Critical severity - -[id="{version}-plugins-{type}s-{plugin}-sev_debug"] -===== `sev_debug` - - * Value type is <> - * Default value is `"🐛"` - -`sev_debug` selects the emoji/unicode character for Debug severity - -[id="{version}-plugins-{type}s-{plugin}-sev_emergency"] -===== `sev_emergency` - - * Value type is <> - * Default value is `"💥"` - -`sev_emergency` selects the emoji/unicode character for Emergency severity - -[id="{version}-plugins-{type}s-{plugin}-sev_error"] -===== `sev_error` - - * Value type is <> - * Default value is `"❌"` - -`sev_error` selects the emoji/unicode character for Error severity - -[id="{version}-plugins-{type}s-{plugin}-sev_info"] -===== `sev_info` - - * Value type is <> - * Default value is `"ℹ️"` - -`sev_info` selects the emoji/unicode character for Informational severity - -[id="{version}-plugins-{type}s-{plugin}-sev_notice"] -===== `sev_notice` - - * Value type is <> - * Default value is `"👀"` - -`sev_notice` selects the emoji/unicode character for Notice severity - -[id="{version}-plugins-{type}s-{plugin}-sev_warning"] -===== `sev_warning` - - * Value type is <> - * Default value is `"⚠️"` - -`sev_warning` selects the emoji/unicode character for Warning severity - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"emoji"` - -The target field you wish to populate with the emoji. The default -is a field named `emoji`. Set this to the same value as the source (`field`) -if you want to do a substitution, in this case filter will allways succeed. -This will overwrite the old value of the source field! - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/environment-index.asciidoc b/docs/versioned-plugins/filters/environment-index.asciidoc deleted file mode 100644 index 3c52cb7a2..000000000 --- a/docs/versioned-plugins/filters/environment-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: environment -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::environment-v3.0.3.asciidoc[] -include::environment-v3.0.2.asciidoc[] -include::environment-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/environment-v3.0.1.asciidoc b/docs/versioned-plugins/filters/environment-v3.0.1.asciidoc deleted file mode 100644 index 5738f5e98..000000000 --- a/docs/versioned-plugins/filters/environment-v3.0.1.asciidoc +++ /dev/null @@ -1,83 +0,0 @@ -:plugin: environment -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-environment/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Environment filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter stores environment variables as subfields in the `@metadata` field. -You can then use these values in other parts of the pipeline. - -Adding environment variables is as easy as: - filter { - environment { - add_metadata_from_env => { "field_name" => "ENV_VAR_NAME" } - } - } - -Accessing stored environment variables is now done through the `@metadata` field: - - ["@metadata"]["field_name"] - -This would reference field `field_name`, which in the above example references -the `ENV_VAR_NAME` environment variable. - -IMPORTANT: Previous versions of this plugin put the environment variables as -fields at the root level of the event. Current versions make use of the -`@metadata` field, as outlined. You have to change `add_field_from_env` in -the older versions to `add_metadata_from_env` in the newer version. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Environment Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-add_metadata_from_env>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-add_metadata_from_env"] -===== `add_metadata_from_env` - - * Value type is <> - * Default value is `{}` - -Specify a hash of field names and the environment variable name with the -value you want imported into Logstash. For example: - - add_metadata_from_env => { "field_name" => "ENV_VAR_NAME" } - -or - - add_metadata_from_env => { - "field1" => "ENV1" - "field2" => "ENV2" - # "field_n" => "ENV_n" - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/environment-v3.0.2.asciidoc b/docs/versioned-plugins/filters/environment-v3.0.2.asciidoc deleted file mode 100644 index c9af23b9a..000000000 --- a/docs/versioned-plugins/filters/environment-v3.0.2.asciidoc +++ /dev/null @@ -1,83 +0,0 @@ -:plugin: environment -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-environment/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Environment filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter stores environment variables as subfields in the `@metadata` field. -You can then use these values in other parts of the pipeline. - -Adding environment variables is as easy as: - filter { - environment { - add_metadata_from_env => { "field_name" => "ENV_VAR_NAME" } - } - } - -Accessing stored environment variables is now done through the `@metadata` field: - - ["@metadata"]["field_name"] - -This would reference field `field_name`, which in the above example references -the `ENV_VAR_NAME` environment variable. - -IMPORTANT: Previous versions of this plugin put the environment variables as -fields at the root level of the event. Current versions make use of the -`@metadata` field, as outlined. You have to change `add_field_from_env` in -the older versions to `add_metadata_from_env` in the newer version. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Environment Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-add_metadata_from_env>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-add_metadata_from_env"] -===== `add_metadata_from_env` - - * Value type is <> - * Default value is `{}` - -Specify a hash of field names and the environment variable name with the -value you want imported into Logstash. For example: - - add_metadata_from_env => { "field_name" => "ENV_VAR_NAME" } - -or - - add_metadata_from_env => { - "field1" => "ENV1" - "field2" => "ENV2" - # "field_n" => "ENV_n" - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/environment-v3.0.3.asciidoc b/docs/versioned-plugins/filters/environment-v3.0.3.asciidoc deleted file mode 100644 index 64b47414d..000000000 --- a/docs/versioned-plugins/filters/environment-v3.0.3.asciidoc +++ /dev/null @@ -1,83 +0,0 @@ -:plugin: environment -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-environment/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Environment filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter stores environment variables as subfields in the `@metadata` field. -You can then use these values in other parts of the pipeline. - -Adding environment variables is as easy as: - filter { - environment { - add_metadata_from_env => { "field_name" => "ENV_VAR_NAME" } - } - } - -Accessing stored environment variables is now done through the `@metadata` field: - - ["@metadata"]["field_name"] - -This would reference field `field_name`, which in the above example references -the `ENV_VAR_NAME` environment variable. - -IMPORTANT: Previous versions of this plugin put the environment variables as -fields at the root level of the event. Current versions make use of the -`@metadata` field, as outlined. You have to change `add_field_from_env` in -the older versions to `add_metadata_from_env` in the newer version. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Environment Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-add_metadata_from_env>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-add_metadata_from_env"] -===== `add_metadata_from_env` - - * Value type is <> - * Default value is `{}` - -Specify a hash of field names and the environment variable name with the -value you want imported into Logstash. For example: - - add_metadata_from_env => { "field_name" => "ENV_VAR_NAME" } - -or - - add_metadata_from_env => { - "field1" => "ENV1" - "field2" => "ENV2" - # "field_n" => "ENV_n" - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/example-index.asciidoc b/docs/versioned-plugins/filters/example-index.asciidoc deleted file mode 100644 index 491dc50e6..000000000 --- a/docs/versioned-plugins/filters/example-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: example -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/filters/extractnumbers-index.asciidoc b/docs/versioned-plugins/filters/extractnumbers-index.asciidoc deleted file mode 100644 index 99cb8fb71..000000000 --- a/docs/versioned-plugins/filters/extractnumbers-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: extractnumbers -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::extractnumbers-v3.0.3.asciidoc[] -include::extractnumbers-v3.0.2.asciidoc[] -include::extractnumbers-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/extractnumbers-v3.0.1.asciidoc b/docs/versioned-plugins/filters/extractnumbers-v3.0.1.asciidoc deleted file mode 100644 index e341c17ee..000000000 --- a/docs/versioned-plugins/filters/extractnumbers-v3.0.1.asciidoc +++ /dev/null @@ -1,61 +0,0 @@ -:plugin: extractnumbers -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-extractnumbers/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Extractnumbers filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter automatically extracts all numbers found inside a string - -This is useful when you have lines that don't match a grok pattern -or use json but you still need to extract numbers. - -Each numbers is returned in a `@fields.intX` or `@fields.floatX` field -where X indicates the position in the string. - -The fields produced by this filter are extra useful used in combination -with kibana number plotting features. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Extractnumbers Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The source field for the data. By default is message. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/extractnumbers-v3.0.2.asciidoc b/docs/versioned-plugins/filters/extractnumbers-v3.0.2.asciidoc deleted file mode 100644 index cc8661d69..000000000 --- a/docs/versioned-plugins/filters/extractnumbers-v3.0.2.asciidoc +++ /dev/null @@ -1,61 +0,0 @@ -:plugin: extractnumbers -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-extractnumbers/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Extractnumbers filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter automatically extracts all numbers found inside a string - -This is useful when you have lines that don't match a grok pattern -or use json but you still need to extract numbers. - -Each numbers is returned in a `@fields.intX` or `@fields.floatX` field -where X indicates the position in the string. - -The fields produced by this filter are extra useful used in combination -with kibana number plotting features. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Extractnumbers Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The source field for the data. By default is message. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/extractnumbers-v3.0.3.asciidoc b/docs/versioned-plugins/filters/extractnumbers-v3.0.3.asciidoc deleted file mode 100644 index 41b3cdac2..000000000 --- a/docs/versioned-plugins/filters/extractnumbers-v3.0.3.asciidoc +++ /dev/null @@ -1,61 +0,0 @@ -:plugin: extractnumbers -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-extractnumbers/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Extractnumbers filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter automatically extracts all numbers found inside a string - -This is useful when you have lines that don't match a grok pattern -or use json but you still need to extract numbers. - -Each numbers is returned in a `@fields.intX` or `@fields.floatX` field -where X indicates the position in the string. - -The fields produced by this filter are extra useful used in combination -with kibana number plotting features. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Extractnumbers Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The source field for the data. By default is message. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/fingerprint-index.asciidoc b/docs/versioned-plugins/filters/fingerprint-index.asciidoc deleted file mode 100644 index c3e7f2484..000000000 --- a/docs/versioned-plugins/filters/fingerprint-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: fingerprint -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-08-14 -| <> | 2017-06-23 -|======================================================================= - -include::fingerprint-v3.1.2.asciidoc[] -include::fingerprint-v3.1.1.asciidoc[] -include::fingerprint-v3.1.0.asciidoc[] -include::fingerprint-v3.0.4.asciidoc[] - diff --git a/docs/versioned-plugins/filters/fingerprint-v3.0.4.asciidoc b/docs/versioned-plugins/filters/fingerprint-v3.0.4.asciidoc deleted file mode 100644 index acd49ef1d..000000000 --- a/docs/versioned-plugins/filters/fingerprint-v3.0.4.asciidoc +++ /dev/null @@ -1,139 +0,0 @@ -:plugin: fingerprint -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-fingerprint/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Fingerprint filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Create consistent hashes (fingerprints) of one or more fields and store -the result in a new field. - -This can e.g. be used to create consistent document ids when inserting -events into Elasticsearch, allowing events in Logstash to cause existing -documents to be updated rather than new documents to be created. - -NOTE: When using any method other than 'UUID', 'PUNCTUATION' or 'MURMUR3' -you must set the key, otherwise the plugin will raise an exception - -NOTE: When the `target` option is set to `UUID` the result won't be -a consistent hash but a random -https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID]. -To generate UUIDs, prefer the <>. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Fingerprint Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-base64encode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-concatenate_sources>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-method>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5", "MURMUR3", "IPV4_NETWORK", "UUID", "PUNCTUATION"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-base64encode"] -===== `base64encode` - - * Value type is <> - * Default value is `false` - -When set to `true`, the `SHA1`, `SHA256`, `SHA384`, `SHA512` and `MD5` fingerprint methods will produce -base64 encoded rather than hex encoded strings. - -[id="{version}-plugins-{type}s-{plugin}-concatenate_sources"] -===== `concatenate_sources` - - * Value type is <> - * Default value is `false` - -When set to `true` and `method` isn't `UUID` or `PUNCTUATION`, the -plugin concatenates the names and values of all fields given in the -`source` option into one string (like the old checksum filter) before -doing the fingerprint computation. If `false` and multiple source -fields are given, the target field will be an array with fingerprints -of the source fields given. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * There is no default value for this setting. - -When used with the `IPV4_NETWORK` method fill in the subnet prefix length. -Key is required with all methods except `MURMUR3`, `PUNCTUATION` or `UUID`. -With other methods fill in the HMAC key. - -[id="{version}-plugins-{type}s-{plugin}-method"] -===== `method` - - * This is a required setting. - * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5`, `MURMUR3`, `IPV4_NETWORK`, `UUID`, `PUNCTUATION` - * Default value is `"SHA1"` - -The fingerprint method to use. - -If set to `SHA1`, `SHA256`, `SHA384`, `SHA512`, or `MD5` the -cryptographic keyed-hash function with the same name will be used to -generate the fingerprint. If set to `MURMUR3` the non-cryptographic -MurmurHash function will be used. - -If set to `IPV4_NETWORK` the input data needs to be a IPv4 address and -the hash value will be the masked-out address using the number of bits -specified in the `key` option. For example, with "1.2.3.4" as the input -and `key` set to 16, the hash becomes "1.2.0.0". - -If set to `PUNCTUATION`, all non-punctuation characters will be removed -from the input string. - -If set to `UUID`, a -https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID] will -be generated. The result will be random and thus not a consistent hash. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The name(s) of the source field(s) whose contents will be used -to create the fingerprint. If an array is given, see the -`concatenate_sources` option. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"fingerprint"` - -The name of the field where the generated fingerprint will be stored. -Any current contents of that field will be overwritten. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/fingerprint-v3.1.0.asciidoc b/docs/versioned-plugins/filters/fingerprint-v3.1.0.asciidoc deleted file mode 100644 index 5fff72e2a..000000000 --- a/docs/versioned-plugins/filters/fingerprint-v3.1.0.asciidoc +++ /dev/null @@ -1,153 +0,0 @@ -:plugin: fingerprint -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.0 -:release_date: 2017-08-14 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-fingerprint/blob/v3.1.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Fingerprint filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Create consistent hashes (fingerprints) of one or more fields and store -the result in a new field. - -This can e.g. be used to create consistent document ids when inserting -events into Elasticsearch, allowing events in Logstash to cause existing -documents to be updated rather than new documents to be created. - -NOTE: When using any method other than 'UUID', 'PUNCTUATION' or 'MURMUR3' -you must set the key, otherwise the plugin will raise an exception - -NOTE: When the `target` option is set to `UUID` the result won't be -a consistent hash but a random -https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID]. -To generate UUIDs, prefer the <>. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Fingerprint Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-base64encode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-concatenate_sources>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-concatenate_all_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-method>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5", "MURMUR3", "IPV4_NETWORK", "UUID", "PUNCTUATION"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-base64encode"] -===== `base64encode` - - * Value type is <> - * Default value is `false` - -When set to `true`, the `SHA1`, `SHA256`, `SHA384`, `SHA512` and `MD5` fingerprint methods will produce -base64 encoded rather than hex encoded strings. - -[id="{version}-plugins-{type}s-{plugin}-concatenate_sources"] -===== `concatenate_sources` - - * Value type is <> - * Default value is `false` - -When set to `true` and `method` isn't `UUID` or `PUNCTUATION`, the -plugin concatenates the names and values of all fields given in the -`source` option into one string (like the old checksum filter) before -doing the fingerprint computation. If `false` and multiple source -fields are given, the target field will be an array with fingerprints -of the source fields given. - -[id="{version}-plugins-{type}s-{plugin}-concatenate_all_fields"] -===== `concatenate_sources` - - * Value type is <> - * Default value is `false` - -When set to `true` and `method` isn't `UUID` or `PUNCTUATION`, the -plugin concatenates the names and values of all fields of the event -into one string (like the old checksum filter) before doing the -fingerprint computation. If `false` and at least one source field is -given, the target field will be an array with fingerprints of the -source fields given. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * There is no default value for this setting. - -When used with the `IPV4_NETWORK` method fill in the subnet prefix length. -Key is required with all methods except `MURMUR3`, `PUNCTUATION` or `UUID`. -With other methods fill in the HMAC key. - -[id="{version}-plugins-{type}s-{plugin}-method"] -===== `method` - - * This is a required setting. - * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5`, `MURMUR3`, `IPV4_NETWORK`, `UUID`, `PUNCTUATION` - * Default value is `"SHA1"` - -The fingerprint method to use. - -If set to `SHA1`, `SHA256`, `SHA384`, `SHA512`, or `MD5` the -cryptographic keyed-hash function with the same name will be used to -generate the fingerprint. If set to `MURMUR3` the non-cryptographic -MurmurHash function will be used. - -If set to `IPV4_NETWORK` the input data needs to be a IPv4 address and -the hash value will be the masked-out address using the number of bits -specified in the `key` option. For example, with "1.2.3.4" as the input -and `key` set to 16, the hash becomes "1.2.0.0". - -If set to `PUNCTUATION`, all non-punctuation characters will be removed -from the input string. - -If set to `UUID`, a -https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID] will -be generated. The result will be random and thus not a consistent hash. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The name(s) of the source field(s) whose contents will be used -to create the fingerprint. If an array is given, see the -`concatenate_sources` option. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"fingerprint"` - -The name of the field where the generated fingerprint will be stored. -Any current contents of that field will be overwritten. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/fingerprint-v3.1.1.asciidoc b/docs/versioned-plugins/filters/fingerprint-v3.1.1.asciidoc deleted file mode 100644 index 45f531d13..000000000 --- a/docs/versioned-plugins/filters/fingerprint-v3.1.1.asciidoc +++ /dev/null @@ -1,153 +0,0 @@ -:plugin: fingerprint -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.1 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-fingerprint/blob/v3.1.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Fingerprint filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Create consistent hashes (fingerprints) of one or more fields and store -the result in a new field. - -This can e.g. be used to create consistent document ids when inserting -events into Elasticsearch, allowing events in Logstash to cause existing -documents to be updated rather than new documents to be created. - -NOTE: When using any method other than 'UUID', 'PUNCTUATION' or 'MURMUR3' -you must set the key, otherwise the plugin will raise an exception - -NOTE: When the `target` option is set to `UUID` the result won't be -a consistent hash but a random -https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID]. -To generate UUIDs, prefer the <>. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Fingerprint Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-base64encode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-concatenate_sources>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-concatenate_all_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-method>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5", "MURMUR3", "IPV4_NETWORK", "UUID", "PUNCTUATION"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-base64encode"] -===== `base64encode` - - * Value type is <> - * Default value is `false` - -When set to `true`, the `SHA1`, `SHA256`, `SHA384`, `SHA512` and `MD5` fingerprint methods will produce -base64 encoded rather than hex encoded strings. - -[id="{version}-plugins-{type}s-{plugin}-concatenate_sources"] -===== `concatenate_sources` - - * Value type is <> - * Default value is `false` - -When set to `true` and `method` isn't `UUID` or `PUNCTUATION`, the -plugin concatenates the names and values of all fields given in the -`source` option into one string (like the old checksum filter) before -doing the fingerprint computation. If `false` and multiple source -fields are given, the target field will be an array with fingerprints -of the source fields given. - -[id="{version}-plugins-{type}s-{plugin}-concatenate_all_fields"] -===== `concatenate_sources` - - * Value type is <> - * Default value is `false` - -When set to `true` and `method` isn't `UUID` or `PUNCTUATION`, the -plugin concatenates the names and values of all fields of the event -into one string (like the old checksum filter) before doing the -fingerprint computation. If `false` and at least one source field is -given, the target field will be an array with fingerprints of the -source fields given. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * There is no default value for this setting. - -When used with the `IPV4_NETWORK` method fill in the subnet prefix length. -Key is required with all methods except `MURMUR3`, `PUNCTUATION` or `UUID`. -With other methods fill in the HMAC key. - -[id="{version}-plugins-{type}s-{plugin}-method"] -===== `method` - - * This is a required setting. - * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5`, `MURMUR3`, `IPV4_NETWORK`, `UUID`, `PUNCTUATION` - * Default value is `"SHA1"` - -The fingerprint method to use. - -If set to `SHA1`, `SHA256`, `SHA384`, `SHA512`, or `MD5` the -cryptographic keyed-hash function with the same name will be used to -generate the fingerprint. If set to `MURMUR3` the non-cryptographic -MurmurHash function will be used. - -If set to `IPV4_NETWORK` the input data needs to be a IPv4 address and -the hash value will be the masked-out address using the number of bits -specified in the `key` option. For example, with "1.2.3.4" as the input -and `key` set to 16, the hash becomes "1.2.0.0". - -If set to `PUNCTUATION`, all non-punctuation characters will be removed -from the input string. - -If set to `UUID`, a -https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID] will -be generated. The result will be random and thus not a consistent hash. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The name(s) of the source field(s) whose contents will be used -to create the fingerprint. If an array is given, see the -`concatenate_sources` option. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"fingerprint"` - -The name of the field where the generated fingerprint will be stored. -Any current contents of that field will be overwritten. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/fingerprint-v3.1.2.asciidoc b/docs/versioned-plugins/filters/fingerprint-v3.1.2.asciidoc deleted file mode 100644 index a7664b280..000000000 --- a/docs/versioned-plugins/filters/fingerprint-v3.1.2.asciidoc +++ /dev/null @@ -1,153 +0,0 @@ -:plugin: fingerprint -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.2 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-fingerprint/blob/v3.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Fingerprint filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Create consistent hashes (fingerprints) of one or more fields and store -the result in a new field. - -This can e.g. be used to create consistent document ids when inserting -events into Elasticsearch, allowing events in Logstash to cause existing -documents to be updated rather than new documents to be created. - -NOTE: When using any method other than 'UUID', 'PUNCTUATION' or 'MURMUR3' -you must set the key, otherwise the plugin will raise an exception - -NOTE: When the `target` option is set to `UUID` the result won't be -a consistent hash but a random -https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID]. -To generate UUIDs, prefer the <>. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Fingerprint Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-base64encode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-concatenate_sources>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-concatenate_all_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-method>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5", "MURMUR3", "IPV4_NETWORK", "UUID", "PUNCTUATION"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-base64encode"] -===== `base64encode` - - * Value type is <> - * Default value is `false` - -When set to `true`, the `SHA1`, `SHA256`, `SHA384`, `SHA512` and `MD5` fingerprint methods will produce -base64 encoded rather than hex encoded strings. - -[id="{version}-plugins-{type}s-{plugin}-concatenate_sources"] -===== `concatenate_sources` - - * Value type is <> - * Default value is `false` - -When set to `true` and `method` isn't `UUID` or `PUNCTUATION`, the -plugin concatenates the names and values of all fields given in the -`source` option into one string (like the old checksum filter) before -doing the fingerprint computation. If `false` and multiple source -fields are given, the target field will be an array with fingerprints -of the source fields given. - -[id="{version}-plugins-{type}s-{plugin}-concatenate_all_fields"] -===== `concatenate_sources` - - * Value type is <> - * Default value is `false` - -When set to `true` and `method` isn't `UUID` or `PUNCTUATION`, the -plugin concatenates the names and values of all fields of the event -into one string (like the old checksum filter) before doing the -fingerprint computation. If `false` and at least one source field is -given, the target field will be an array with fingerprints of the -source fields given. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * There is no default value for this setting. - -When used with the `IPV4_NETWORK` method fill in the subnet prefix length. -Key is required with all methods except `MURMUR3`, `PUNCTUATION` or `UUID`. -With other methods fill in the HMAC key. - -[id="{version}-plugins-{type}s-{plugin}-method"] -===== `method` - - * This is a required setting. - * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5`, `MURMUR3`, `IPV4_NETWORK`, `UUID`, `PUNCTUATION` - * Default value is `"SHA1"` - -The fingerprint method to use. - -If set to `SHA1`, `SHA256`, `SHA384`, `SHA512`, or `MD5` the -cryptographic keyed-hash function with the same name will be used to -generate the fingerprint. If set to `MURMUR3` the non-cryptographic -MurmurHash function will be used. - -If set to `IPV4_NETWORK` the input data needs to be a IPv4 address and -the hash value will be the masked-out address using the number of bits -specified in the `key` option. For example, with "1.2.3.4" as the input -and `key` set to 16, the hash becomes "1.2.0.0". - -If set to `PUNCTUATION`, all non-punctuation characters will be removed -from the input string. - -If set to `UUID`, a -https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID] will -be generated. The result will be random and thus not a consistent hash. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The name(s) of the source field(s) whose contents will be used -to create the fingerprint. If an array is given, see the -`concatenate_sources` option. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"fingerprint"` - -The name of the field where the generated fingerprint will be stored. -Any current contents of that field will be overwritten. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/geoip-index.asciidoc b/docs/versioned-plugins/filters/geoip-index.asciidoc deleted file mode 100644 index 5b47f7cec..000000000 --- a/docs/versioned-plugins/filters/geoip-index.asciidoc +++ /dev/null @@ -1,28 +0,0 @@ -:plugin: geoip -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-12-21 -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-08-01 -| <> | 2017-08-18 -| <> | 2017-07-17 -| <> | 2017-06-23 -| <> | 2017-06-22 -| <> | 2017-05-15 -|======================================================================= - -include::geoip-v5.0.3.asciidoc[] -include::geoip-v5.0.2.asciidoc[] -include::geoip-v5.0.1.asciidoc[] -include::geoip-v5.0.0.asciidoc[] -include::geoip-v4.3.1.asciidoc[] -include::geoip-v4.3.0.asciidoc[] -include::geoip-v4.2.1.asciidoc[] -include::geoip-v4.2.0.asciidoc[] -include::geoip-v4.1.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/geoip-v4.1.1.asciidoc b/docs/versioned-plugins/filters/geoip-v4.1.1.asciidoc deleted file mode 100644 index 55a042775..000000000 --- a/docs/versioned-plugins/filters/geoip-v4.1.1.asciidoc +++ /dev/null @@ -1,180 +0,0 @@ -:plugin: geoip -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.1.1 -:release_date: 2017-05-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v4.1.1/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Geoip - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The GeoIP filter adds information about the geographical location of IP addresses, -based on data from the Maxmind GeoLite2 database. Commercial databases from Maxmind are -also supported in this plugin. - -A `[geoip][location]` field is created if -the GeoIP lookup returns a latitude and longitude. The field is stored in -http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, -the default Elasticsearch template provided with the -<> maps -the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. - -As this field is a `geo_point` _and_ it is still valid GeoJSON, you get -the awesomeness of Elasticsearch's geospatial query, facet and filter functions -and the flexibility of having GeoJSON for all other applications (like Kibana's -map visualization). - -[NOTE] --- -This product includes GeoLite2 data created by MaxMind, available from -http://www.maxmind.com. This database is licensed under -http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. - -Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database -and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy -MaxMind GeoLite database and support IPv4 lookups only. --- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Geoip Filter Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_size"] -===== `cache_size` - - * Value type is <> - * Default value is `1000` - -GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that -IPs agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. -Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter -of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-database"] -===== `database` - - * Value type is <> - * There is no default value for this setting. - -The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. -GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. -GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. - -If not specified, this will default to the GeoLite2 City database that ships -with Logstash. - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * There is no default value for this setting. - -An array of geoip fields to be included in the event. - -Possible fields depend on the database type. By default, all geoip fields -are included in the event. - -For the built-in GeoLite2 City database, the following are available: -`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, -`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. - -[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] -===== `lru_cache_size` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `1000` - -GeoIP lookup is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that -IPs agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. - -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter -of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field containing the IP address or hostname to map via geoip. If -this field is an array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_geoip_lookup_failure"]` - -Tags the event on failure to look up geo information. This can be used in later analysis. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"geoip"` - -Specify the field into which Logstash should store the geoip data. -This can be useful, for example, if you have `src_ip` and `dst_ip` fields and -would like the GeoIP information of both IPs. - -If you save the data to a target field other than `geoip` and want to use the -`geo_point` related functions in Elasticsearch, you need to alter the template -provided with the Elasticsearch output and configure the output to use the -new template. - -Even if you don't use the `geo_point` mapping, the `[target][location]` field -is still valid GeoJSON. - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/geoip-v4.2.0.asciidoc b/docs/versioned-plugins/filters/geoip-v4.2.0.asciidoc deleted file mode 100644 index 7661e39ae..000000000 --- a/docs/versioned-plugins/filters/geoip-v4.2.0.asciidoc +++ /dev/null @@ -1,195 +0,0 @@ -:plugin: geoip -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.2.0 -:release_date: 2017-06-22 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v4.2.0/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Geoip filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The GeoIP filter adds information about the geographical location of IP addresses, -based on data from the Maxmind GeoLite2 databases. - -==== Supported Databases - -This plugin is bundled with https://dev.maxmind.com/geoip/geoip2/geolite2[GeoLite2] City database out of the box. From Maxmind's description -- -"GeoLite2 databases are free IP geolocation databases comparable to, but less accurate than, MaxMind’s -GeoIP2 databases". Please see GeoIP Lite2 license for more details. - -https://www.maxmind.com/en/geoip2-databases[Commercial databases] from Maxmind are also supported in this plugin. - -If you need to use databases other than the bundled GeoLite2 City, you can download them directly -from Maxmind's website and use the `database` option to specify their location. The GeoLite2 databases -can be downloaded from https://dev.maxmind.com/geoip/geoip2/geolite2[here]. - -If you would like to get Autonomous System Number(ASN) information, you can use the GeoLite2-ASN database. - -==== Details - -A `[geoip][location]` field is created if -the GeoIP lookup returns a latitude and longitude. The field is stored in -http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, -the default Elasticsearch template provided with the -<> maps -the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. - -As this field is a `geo_point` _and_ it is still valid GeoJSON, you get -the awesomeness of Elasticsearch's geospatial query, facet and filter functions -and the flexibility of having GeoJSON for all other applications (like Kibana's -map visualization). - -[NOTE] --- -This product includes GeoLite2 data created by MaxMind, available from -http://www.maxmind.com. This database is licensed under -http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. - -Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database -and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy -MaxMind GeoLite database and support IPv4 lookups only. --- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Geoip Filter Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_size"] -===== `cache_size` - - * Value type is <> - * Default value is `1000` - -GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that -IPs agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. -Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter -of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-database"] -===== `database` - - * Value type is <> - * There is no default value for this setting. - -The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. -GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. -GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. - -If not specified, this will default to the GeoLite2 City database that ships -with Logstash. - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * There is no default value for this setting. - -An array of geoip fields to be included in the event. - -Possible fields depend on the database type. By default, all geoip fields -are included in the event. - -For the built-in GeoLite2 City database, the following are available: -`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, -`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. - -[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] -===== `lru_cache_size` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `1000` - -GeoIP lookup is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that -IPs agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. - -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter -of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field containing the IP address or hostname to map via geoip. If -this field is an array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_geoip_lookup_failure"]` - -Tags the event on failure to look up geo information. This can be used in later analysis. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"geoip"` - -Specify the field into which Logstash should store the geoip data. -This can be useful, for example, if you have `src_ip` and `dst_ip` fields and -would like the GeoIP information of both IPs. - -If you save the data to a target field other than `geoip` and want to use the -`geo_point` related functions in Elasticsearch, you need to alter the template -provided with the Elasticsearch output and configure the output to use the -new template. - -Even if you don't use the `geo_point` mapping, the `[target][location]` field -is still valid GeoJSON. - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/geoip-v4.2.1.asciidoc b/docs/versioned-plugins/filters/geoip-v4.2.1.asciidoc deleted file mode 100644 index 093847bce..000000000 --- a/docs/versioned-plugins/filters/geoip-v4.2.1.asciidoc +++ /dev/null @@ -1,196 +0,0 @@ -:plugin: geoip -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.2.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v4.2.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Geoip filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The GeoIP filter adds information about the geographical location of IP addresses, -based on data from the Maxmind GeoLite2 databases. - -==== Supported Databases - -This plugin is bundled with https://dev.maxmind.com/geoip/geoip2/geolite2[GeoLite2] City database out of the box. From Maxmind's description -- -"GeoLite2 databases are free IP geolocation databases comparable to, but less accurate than, MaxMind’s -GeoIP2 databases". Please see GeoIP Lite2 license for more details. - -https://www.maxmind.com/en/geoip2-databases[Commercial databases] from Maxmind are also supported in this plugin. - -If you need to use databases other than the bundled GeoLite2 City, you can download them directly -from Maxmind's website and use the `database` option to specify their location. The GeoLite2 databases -can be downloaded from https://dev.maxmind.com/geoip/geoip2/geolite2[here]. - -If you would like to get Autonomous System Number(ASN) information, you can use the GeoLite2-ASN database. - -==== Details - -A `[geoip][location]` field is created if -the GeoIP lookup returns a latitude and longitude. The field is stored in -http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, -the default Elasticsearch template provided with the -<> maps -the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. - -As this field is a `geo_point` _and_ it is still valid GeoJSON, you get -the awesomeness of Elasticsearch's geospatial query, facet and filter functions -and the flexibility of having GeoJSON for all other applications (like Kibana's -map visualization). - -[NOTE] --- -This product includes GeoLite2 data created by MaxMind, available from -http://www.maxmind.com. This database is licensed under -http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. - -Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database -and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy -MaxMind GeoLite database and support IPv4 lookups only. --- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Geoip Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_size"] -===== `cache_size` - - * Value type is <> - * Default value is `1000` - -GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that -IPs agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. -Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter -of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-database"] -===== `database` - - * Value type is <> - * There is no default value for this setting. - -The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. -GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. -GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. - -If not specified, this will default to the GeoLite2 City database that ships -with Logstash. - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * There is no default value for this setting. - -An array of geoip fields to be included in the event. - -Possible fields depend on the database type. By default, all geoip fields -are included in the event. - -For the built-in GeoLite2 City database, the following are available: -`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, -`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. - -[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] -===== `lru_cache_size` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `1000` - -GeoIP lookup is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that -IPs agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. - -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter -of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field containing the IP address or hostname to map via geoip. If -this field is an array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_geoip_lookup_failure"]` - -Tags the event on failure to look up geo information. This can be used in later analysis. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"geoip"` - -Specify the field into which Logstash should store the geoip data. -This can be useful, for example, if you have `src_ip` and `dst_ip` fields and -would like the GeoIP information of both IPs. - -If you save the data to a target field other than `geoip` and want to use the -`geo_point` related functions in Elasticsearch, you need to alter the template -provided with the Elasticsearch output and configure the output to use the -new template. - -Even if you don't use the `geo_point` mapping, the `[target][location]` field -is still valid GeoJSON. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/geoip-v4.3.0.asciidoc b/docs/versioned-plugins/filters/geoip-v4.3.0.asciidoc deleted file mode 100644 index b8ab940d7..000000000 --- a/docs/versioned-plugins/filters/geoip-v4.3.0.asciidoc +++ /dev/null @@ -1,206 +0,0 @@ -:plugin: geoip -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.3.0 -:release_date: 2017-07-17 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v4.3.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Geoip filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The GeoIP filter adds information about the geographical location of IP addresses, -based on data from the Maxmind GeoLite2 databases. - -==== Supported Databases - -This plugin is bundled with https://dev.maxmind.com/geoip/geoip2/geolite2[GeoLite2] City database out of the box. From Maxmind's description -- -"GeoLite2 databases are free IP geolocation databases comparable to, but less accurate than, MaxMind’s -GeoIP2 databases". Please see GeoIP Lite2 license for more details. - -https://www.maxmind.com/en/geoip2-databases[Commercial databases] from Maxmind are also supported in this plugin. - -If you need to use databases other than the bundled GeoLite2 City, you can download them directly -from Maxmind's website and use the `database` option to specify their location. The GeoLite2 databases -can be downloaded from https://dev.maxmind.com/geoip/geoip2/geolite2[here]. - -If you would like to get Autonomous System Number(ASN) information, you can use the GeoLite2-ASN database. - -==== Details - -A `[geoip][location]` field is created if -the GeoIP lookup returns a latitude and longitude. The field is stored in -http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, -the default Elasticsearch template provided with the -<> maps -the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. - -As this field is a `geo_point` _and_ it is still valid GeoJSON, you get -the awesomeness of Elasticsearch's geospatial query, facet and filter functions -and the flexibility of having GeoJSON for all other applications (like Kibana's -map visualization). - -[NOTE] --- -This product includes GeoLite2 data created by MaxMind, available from -http://www.maxmind.com. This database is licensed under -http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. - -Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database -and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy -MaxMind GeoLite database and support IPv4 lookups only. --- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Geoip Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-default_database_type>> |`City` or `ASN`|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_size"] -===== `cache_size` - - * Value type is <> - * Default value is `1000` - -GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that -IPs agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. -Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter -of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-database"] -===== `database` - - * Value type is <> - * There is no default value for this setting. - -The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. -GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. -GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. - -If not specified, this will default to the GeoLite2 City database that ships -with Logstash. - -[id="{version}-plugins-{type}s-{plugin}-default_database_type"] -===== `default_database_type` - -This plugin now includes both the GeoLite2-City and GeoLite2-ASN databases. If `database` and `default_database_type` are unset, the GeoLite2-City database will be selected. To use the included GeoLite2-ASN database, set `default_database_type` to `ASN`. - - * Value type is <> - * The default value is `City` - * The only acceptable values are `City` and `ASN` - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * There is no default value for this setting. - -An array of geoip fields to be included in the event. - -Possible fields depend on the database type. By default, all geoip fields -are included in the event. - -For the built-in GeoLite2 City database, the following are available: -`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, -`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. - -[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] -===== `lru_cache_size` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `1000` - -GeoIP lookup is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that -IPs agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. - -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter -of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field containing the IP address or hostname to map via geoip. If -this field is an array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_geoip_lookup_failure"]` - -Tags the event on failure to look up geo information. This can be used in later analysis. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"geoip"` - -Specify the field into which Logstash should store the geoip data. -This can be useful, for example, if you have `src_ip` and `dst_ip` fields and -would like the GeoIP information of both IPs. - -If you save the data to a target field other than `geoip` and want to use the -`geo_point` related functions in Elasticsearch, you need to alter the template -provided with the Elasticsearch output and configure the output to use the -new template. - -Even if you don't use the `geo_point` mapping, the `[target][location]` field -is still valid GeoJSON. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/geoip-v4.3.1.asciidoc b/docs/versioned-plugins/filters/geoip-v4.3.1.asciidoc deleted file mode 100644 index a7bb66dee..000000000 --- a/docs/versioned-plugins/filters/geoip-v4.3.1.asciidoc +++ /dev/null @@ -1,206 +0,0 @@ -:plugin: geoip -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.3.1 -:release_date: 2017-08-18 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v4.3.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Geoip filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The GeoIP filter adds information about the geographical location of IP addresses, -based on data from the Maxmind GeoLite2 databases. - -==== Supported Databases - -This plugin is bundled with https://dev.maxmind.com/geoip/geoip2/geolite2[GeoLite2] City database out of the box. From Maxmind's description -- -"GeoLite2 databases are free IP geolocation databases comparable to, but less accurate than, MaxMind’s -GeoIP2 databases". Please see GeoIP Lite2 license for more details. - -https://www.maxmind.com/en/geoip2-databases[Commercial databases] from Maxmind are also supported in this plugin. - -If you need to use databases other than the bundled GeoLite2 City, you can download them directly -from Maxmind's website and use the `database` option to specify their location. The GeoLite2 databases -can be downloaded from https://dev.maxmind.com/geoip/geoip2/geolite2[here]. - -If you would like to get Autonomous System Number(ASN) information, you can use the GeoLite2-ASN database. - -==== Details - -A `[geoip][location]` field is created if -the GeoIP lookup returns a latitude and longitude. The field is stored in -http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, -the default Elasticsearch template provided with the -<> maps -the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. - -As this field is a `geo_point` _and_ it is still valid GeoJSON, you get -the awesomeness of Elasticsearch's geospatial query, facet and filter functions -and the flexibility of having GeoJSON for all other applications (like Kibana's -map visualization). - -[NOTE] --- -This product includes GeoLite2 data created by MaxMind, available from -http://www.maxmind.com. This database is licensed under -http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. - -Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database -and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy -MaxMind GeoLite database and support IPv4 lookups only. --- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Geoip Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-default_database_type>> |`City` or `ASN`|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_size"] -===== `cache_size` - - * Value type is <> - * Default value is `1000` - -GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that -IPs agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. -Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter -of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-database"] -===== `database` - - * Value type is <> - * There is no default value for this setting. - -The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. -GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. -GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. - -If not specified, this will default to the GeoLite2 City database that ships -with Logstash. - -[id="{version}-plugins-{type}s-{plugin}-default_database_type"] -===== `default_database_type` - -This plugin now includes both the GeoLite2-City and GeoLite2-ASN databases. If `database` and `default_database_type` are unset, the GeoLite2-City database will be selected. To use the included GeoLite2-ASN database, set `default_database_type` to `ASN`. - - * Value type is <> - * The default value is `City` - * The only acceptable values are `City` and `ASN` - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * There is no default value for this setting. - -An array of geoip fields to be included in the event. - -Possible fields depend on the database type. By default, all geoip fields -are included in the event. - -For the built-in GeoLite2 City database, the following are available: -`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, -`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. - -[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] -===== `lru_cache_size` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `1000` - -GeoIP lookup is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that -IPs agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. - -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter -of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field containing the IP address or hostname to map via geoip. If -this field is an array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_geoip_lookup_failure"]` - -Tags the event on failure to look up geo information. This can be used in later analysis. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"geoip"` - -Specify the field into which Logstash should store the geoip data. -This can be useful, for example, if you have `src_ip` and `dst_ip` fields and -would like the GeoIP information of both IPs. - -If you save the data to a target field other than `geoip` and want to use the -`geo_point` related functions in Elasticsearch, you need to alter the template -provided with the Elasticsearch output and configure the output to use the -new template. - -Even if you don't use the `geo_point` mapping, the `[target][location]` field -is still valid GeoJSON. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/geoip-v5.0.0.asciidoc b/docs/versioned-plugins/filters/geoip-v5.0.0.asciidoc deleted file mode 100644 index 2628074cb..000000000 --- a/docs/versioned-plugins/filters/geoip-v5.0.0.asciidoc +++ /dev/null @@ -1,184 +0,0 @@ -:plugin: geoip -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.0 -:release_date: 2017-08-01 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v5.0.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Geoip filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The GeoIP filter adds information about the geographical location of IP addresses, -based on data from the Maxmind GeoLite2 databases. - -==== Supported Databases - -This plugin is bundled with https://dev.maxmind.com/geoip/geoip2/geolite2[GeoLite2] City database out of the box. From Maxmind's description -- -"GeoLite2 databases are free IP geolocation databases comparable to, but less accurate than, MaxMind’s -GeoIP2 databases". Please see GeoIP Lite2 license for more details. - -https://www.maxmind.com/en/geoip2-databases[Commercial databases] from Maxmind are also supported in this plugin. - -If you need to use databases other than the bundled GeoLite2 City, you can download them directly -from Maxmind's website and use the `database` option to specify their location. The GeoLite2 databases -can be downloaded from https://dev.maxmind.com/geoip/geoip2/geolite2[here]. - -If you would like to get Autonomous System Number(ASN) information, you can use the GeoLite2-ASN database. - -==== Details - -A `[geoip][location]` field is created if -the GeoIP lookup returns a latitude and longitude. The field is stored in -http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, -the default Elasticsearch template provided with the -<> maps -the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. - -As this field is a `geo_point` _and_ it is still valid GeoJSON, you get -the awesomeness of Elasticsearch's geospatial query, facet and filter functions -and the flexibility of having GeoJSON for all other applications (like Kibana's -map visualization). - -[NOTE] --- -This product includes GeoLite2 data created by MaxMind, available from -http://www.maxmind.com. This database is licensed under -http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. - -Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database -and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy -MaxMind GeoLite database and support IPv4 lookups only. --- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Geoip Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-default_database_type>> |`City` or `ASN`|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_size"] -===== `cache_size` - - * Value type is <> - * Default value is `1000` - -GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that -IPs agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. -Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter -of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-database"] -===== `database` - - * Value type is <> - * There is no default value for this setting. - -The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. -GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. -GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. - -If not specified, this will default to the GeoLite2 City database that ships -with Logstash. - -[id="{version}-plugins-{type}s-{plugin}-default_database_type"] -===== `default_database_type` - -This plugin now includes both the GeoLite2-City and GeoLite2-ASN databases. If `database` and `default_database_type` are unset, the GeoLite2-City database will be selected. To use the included GeoLite2-ASN database, set `default_database_type` to `ASN`. - - * Value type is <> - * The default value is `City` - * The only acceptable values are `City` and `ASN` - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * There is no default value for this setting. - -An array of geoip fields to be included in the event. - -Possible fields depend on the database type. By default, all geoip fields -are included in the event. - -For the built-in GeoLite2 City database, the following are available: -`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, -`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field containing the IP address or hostname to map via geoip. If -this field is an array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_geoip_lookup_failure"]` - -Tags the event on failure to look up geo information. This can be used in later analysis. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"geoip"` - -Specify the field into which Logstash should store the geoip data. -This can be useful, for example, if you have `src_ip` and `dst_ip` fields and -would like the GeoIP information of both IPs. - -If you save the data to a target field other than `geoip` and want to use the -`geo_point` related functions in Elasticsearch, you need to alter the template -provided with the Elasticsearch output and configure the output to use the -new template. - -Even if you don't use the `geo_point` mapping, the `[target][location]` field -is still valid GeoJSON. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/geoip-v5.0.1.asciidoc b/docs/versioned-plugins/filters/geoip-v5.0.1.asciidoc deleted file mode 100644 index 810f950a5..000000000 --- a/docs/versioned-plugins/filters/geoip-v5.0.1.asciidoc +++ /dev/null @@ -1,184 +0,0 @@ -:plugin: geoip -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.1 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v5.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Geoip filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The GeoIP filter adds information about the geographical location of IP addresses, -based on data from the Maxmind GeoLite2 databases. - -==== Supported Databases - -This plugin is bundled with https://dev.maxmind.com/geoip/geoip2/geolite2[GeoLite2] City database out of the box. From Maxmind's description -- -"GeoLite2 databases are free IP geolocation databases comparable to, but less accurate than, MaxMind’s -GeoIP2 databases". Please see GeoIP Lite2 license for more details. - -https://www.maxmind.com/en/geoip2-databases[Commercial databases] from Maxmind are also supported in this plugin. - -If you need to use databases other than the bundled GeoLite2 City, you can download them directly -from Maxmind's website and use the `database` option to specify their location. The GeoLite2 databases -can be downloaded from https://dev.maxmind.com/geoip/geoip2/geolite2[here]. - -If you would like to get Autonomous System Number(ASN) information, you can use the GeoLite2-ASN database. - -==== Details - -A `[geoip][location]` field is created if -the GeoIP lookup returns a latitude and longitude. The field is stored in -http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, -the default Elasticsearch template provided with the -<> maps -the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. - -As this field is a `geo_point` _and_ it is still valid GeoJSON, you get -the awesomeness of Elasticsearch's geospatial query, facet and filter functions -and the flexibility of having GeoJSON for all other applications (like Kibana's -map visualization). - -[NOTE] --- -This product includes GeoLite2 data created by MaxMind, available from -http://www.maxmind.com. This database is licensed under -http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. - -Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database -and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy -MaxMind GeoLite database and support IPv4 lookups only. --- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Geoip Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-default_database_type>> |`City` or `ASN`|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_size"] -===== `cache_size` - - * Value type is <> - * Default value is `1000` - -GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that -IPs agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. -Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter -of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-database"] -===== `database` - - * Value type is <> - * There is no default value for this setting. - -The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. -GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. -GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. - -If not specified, this will default to the GeoLite2 City database that ships -with Logstash. - -[id="{version}-plugins-{type}s-{plugin}-default_database_type"] -===== `default_database_type` - -This plugin now includes both the GeoLite2-City and GeoLite2-ASN databases. If `database` and `default_database_type` are unset, the GeoLite2-City database will be selected. To use the included GeoLite2-ASN database, set `default_database_type` to `ASN`. - - * Value type is <> - * The default value is `City` - * The only acceptable values are `City` and `ASN` - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * There is no default value for this setting. - -An array of geoip fields to be included in the event. - -Possible fields depend on the database type. By default, all geoip fields -are included in the event. - -For the built-in GeoLite2 City database, the following are available: -`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, -`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field containing the IP address or hostname to map via geoip. If -this field is an array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_geoip_lookup_failure"]` - -Tags the event on failure to look up geo information. This can be used in later analysis. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"geoip"` - -Specify the field into which Logstash should store the geoip data. -This can be useful, for example, if you have `src_ip` and `dst_ip` fields and -would like the GeoIP information of both IPs. - -If you save the data to a target field other than `geoip` and want to use the -`geo_point` related functions in Elasticsearch, you need to alter the template -provided with the Elasticsearch output and configure the output to use the -new template. - -Even if you don't use the `geo_point` mapping, the `[target][location]` field -is still valid GeoJSON. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/geoip-v5.0.2.asciidoc b/docs/versioned-plugins/filters/geoip-v5.0.2.asciidoc deleted file mode 100644 index e0a3bfd72..000000000 --- a/docs/versioned-plugins/filters/geoip-v5.0.2.asciidoc +++ /dev/null @@ -1,184 +0,0 @@ -:plugin: geoip -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.2 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v5.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Geoip filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The GeoIP filter adds information about the geographical location of IP addresses, -based on data from the Maxmind GeoLite2 databases. - -==== Supported Databases - -This plugin is bundled with https://dev.maxmind.com/geoip/geoip2/geolite2[GeoLite2] City database out of the box. From Maxmind's description -- -"GeoLite2 databases are free IP geolocation databases comparable to, but less accurate than, MaxMind’s -GeoIP2 databases". Please see GeoIP Lite2 license for more details. - -https://www.maxmind.com/en/geoip2-databases[Commercial databases] from Maxmind are also supported in this plugin. - -If you need to use databases other than the bundled GeoLite2 City, you can download them directly -from Maxmind's website and use the `database` option to specify their location. The GeoLite2 databases -can be downloaded from https://dev.maxmind.com/geoip/geoip2/geolite2[here]. - -If you would like to get Autonomous System Number(ASN) information, you can use the GeoLite2-ASN database. - -==== Details - -A `[geoip][location]` field is created if -the GeoIP lookup returns a latitude and longitude. The field is stored in -http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, -the default Elasticsearch template provided with the -<> maps -the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. - -As this field is a `geo_point` _and_ it is still valid GeoJSON, you get -the awesomeness of Elasticsearch's geospatial query, facet and filter functions -and the flexibility of having GeoJSON for all other applications (like Kibana's -map visualization). - -[NOTE] --- -This product includes GeoLite2 data created by MaxMind, available from -http://www.maxmind.com. This database is licensed under -http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. - -Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database -and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy -MaxMind GeoLite database and support IPv4 lookups only. --- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Geoip Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-default_database_type>> |`City` or `ASN`|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_size"] -===== `cache_size` - - * Value type is <> - * Default value is `1000` - -GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that -IPs agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. -Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter -of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-database"] -===== `database` - - * Value type is <> - * There is no default value for this setting. - -The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. -GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. -GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. - -If not specified, this will default to the GeoLite2 City database that ships -with Logstash. - -[id="{version}-plugins-{type}s-{plugin}-default_database_type"] -===== `default_database_type` - -This plugin now includes both the GeoLite2-City and GeoLite2-ASN databases. If `database` and `default_database_type` are unset, the GeoLite2-City database will be selected. To use the included GeoLite2-ASN database, set `default_database_type` to `ASN`. - - * Value type is <> - * The default value is `City` - * The only acceptable values are `City` and `ASN` - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * There is no default value for this setting. - -An array of geoip fields to be included in the event. - -Possible fields depend on the database type. By default, all geoip fields -are included in the event. - -For the built-in GeoLite2 City database, the following are available: -`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, -`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field containing the IP address or hostname to map via geoip. If -this field is an array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_geoip_lookup_failure"]` - -Tags the event on failure to look up geo information. This can be used in later analysis. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"geoip"` - -Specify the field into which Logstash should store the geoip data. -This can be useful, for example, if you have `src_ip` and `dst_ip` fields and -would like the GeoIP information of both IPs. - -If you save the data to a target field other than `geoip` and want to use the -`geo_point` related functions in Elasticsearch, you need to alter the template -provided with the Elasticsearch output and configure the output to use the -new template. - -Even if you don't use the `geo_point` mapping, the `[target][location]` field -is still valid GeoJSON. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/geoip-v5.0.3.asciidoc b/docs/versioned-plugins/filters/geoip-v5.0.3.asciidoc deleted file mode 100644 index ff12e497c..000000000 --- a/docs/versioned-plugins/filters/geoip-v5.0.3.asciidoc +++ /dev/null @@ -1,184 +0,0 @@ -:plugin: geoip -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.3 -:release_date: 2017-12-21 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-geoip/blob/v5.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Geoip filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The GeoIP filter adds information about the geographical location of IP addresses, -based on data from the Maxmind GeoLite2 databases. - -==== Supported Databases - -This plugin is bundled with https://dev.maxmind.com/geoip/geoip2/geolite2[GeoLite2] City database out of the box. From Maxmind's description -- -"GeoLite2 databases are free IP geolocation databases comparable to, but less accurate than, MaxMind’s -GeoIP2 databases". Please see GeoIP Lite2 license for more details. - -https://www.maxmind.com/en/geoip2-databases[Commercial databases] from Maxmind are also supported in this plugin. - -If you need to use databases other than the bundled GeoLite2 City, you can download them directly -from Maxmind's website and use the `database` option to specify their location. The GeoLite2 databases -can be downloaded from https://dev.maxmind.com/geoip/geoip2/geolite2[here]. - -If you would like to get Autonomous System Number(ASN) information, you can use the GeoLite2-ASN database. - -==== Details - -A `[geoip][location]` field is created if -the GeoIP lookup returns a latitude and longitude. The field is stored in -http://geojson.org/geojson-spec.html[GeoJSON] format. Additionally, -the default Elasticsearch template provided with the -<> maps -the `[geoip][location]` field to an http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/mapping-geo-point-type.html#_mapping_options[Elasticsearch geo_point]. - -As this field is a `geo_point` _and_ it is still valid GeoJSON, you get -the awesomeness of Elasticsearch's geospatial query, facet and filter functions -and the flexibility of having GeoJSON for all other applications (like Kibana's -map visualization). - -[NOTE] --- -This product includes GeoLite2 data created by MaxMind, available from -http://www.maxmind.com. This database is licensed under -http://creativecommons.org/licenses/by-sa/4.0/[Creative Commons Attribution-ShareAlike 4.0 International License]. - -Versions 4.0.0 and later of the GeoIP filter use the MaxMind GeoLite2 database -and support both IPv4 and IPv6 lookups. Versions prior to 4.0.0 use the legacy -MaxMind GeoLite database and support IPv4 lookups only. --- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Geoip Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-database>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-default_database_type>> |`City` or `ASN`|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_size"] -===== `cache_size` - - * Value type is <> - * Default value is `1000` - -GeoIP lookup is surprisingly expensive. This filter uses an cache to take advantage of the fact that -IPs agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. -Since the Geoip API upgraded to v2, there is not any eviction policy so far, if cache is full, no more record can be added. -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global to the geoip_type. That is to say all instances of the geoip filter -of the same geoip_type share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-database"] -===== `database` - - * Value type is <> - * There is no default value for this setting. - -The path to Maxmind's database file that Logstash should use. The default database is GeoLite2-City. -GeoLite2-City, GeoLite2-Country, GeoLite2-ASN are the free databases from Maxmind that are supported. -GeoIP2-City, GeoIP2-ISP, GeoIP2-Country are the commercial databases from Maxmind that are supported. - -If not specified, this will default to the GeoLite2 City database that ships -with Logstash. - -[id="{version}-plugins-{type}s-{plugin}-default_database_type"] -===== `default_database_type` - -This plugin now includes both the GeoLite2-City and GeoLite2-ASN databases. If `database` and `default_database_type` are unset, the GeoLite2-City database will be selected. To use the included GeoLite2-ASN database, set `default_database_type` to `ASN`. - - * Value type is <> - * The default value is `City` - * The only acceptable values are `City` and `ASN` - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * There is no default value for this setting. - -An array of geoip fields to be included in the event. - -Possible fields depend on the database type. By default, all geoip fields -are included in the event. - -For the built-in GeoLite2 City database, the following are available: -`city_name`, `continent_code`, `country_code2`, `country_code3`, `country_name`, -`dma_code`, `ip`, `latitude`, `longitude`, `postal_code`, `region_name` and `timezone`. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field containing the IP address or hostname to map via geoip. If -this field is an array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_geoip_lookup_failure"]` - -Tags the event on failure to look up geo information. This can be used in later analysis. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"geoip"` - -Specify the field into which Logstash should store the geoip data. -This can be useful, for example, if you have `src_ip` and `dst_ip` fields and -would like the GeoIP information of both IPs. - -If you save the data to a target field other than `geoip` and want to use the -`geo_point` related functions in Elasticsearch, you need to alter the template -provided with the Elasticsearch output and configure the output to use the -new template. - -Even if you don't use the `geo_point` mapping, the `[target][location]` field -is still valid GeoJSON. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/hashid-index.asciidoc b/docs/versioned-plugins/filters/hashid-index.asciidoc deleted file mode 100644 index 34c846d70..000000000 --- a/docs/versioned-plugins/filters/hashid-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: hashid -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-21 -| <> | 2017-06-23 -|======================================================================= - -include::hashid-v0.1.3.asciidoc[] -include::hashid-v0.1.2.asciidoc[] - diff --git a/docs/versioned-plugins/filters/hashid-v0.1.2.asciidoc b/docs/versioned-plugins/filters/hashid-v0.1.2.asciidoc deleted file mode 100644 index 1ce725ca6..000000000 --- a/docs/versioned-plugins/filters/hashid-v0.1.2.asciidoc +++ /dev/null @@ -1,110 +0,0 @@ -:plugin: hashid -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v0.1.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-hashid/blob/v0.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Hashid filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter allow you to generate predictable, string encoded hashed keys -based om event contents and timestamp. This can be used to avoid getting -duplicate records indexed into Elasticsearch. - -Hashed keys to be generated based on full or partial hashes and -has the ability to prefix these keys based on the event timestamp in order -to make then largely ordered by timestamp, which tend to lead to increased -indexing performance for event based use cases where data is being indexed -in near real time. - -When used with the timestamp prefix enabled, it should ideally be run after -the date filter has run and populated the @timestamp field. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Hashid Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-add_timestamp_prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hash_bytes_used>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-method>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5"]`|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-add_timestamp_prefix"] -===== `add_timestamp_prefix` - - * Value type is <> - * Default value is `true` - -Use the timestamp to generate an ID prefix - -[id="{version}-plugins-{type}s-{plugin}-hash_bytes_used"] -===== `hash_bytes_used` - - * Value type is <> - * There is no default value for this setting. - -If full hash generated is not to be used, this parameter specifies how many bytes that should be used -If not specified, the full hash will be used - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * Default value is `"hashid"` - -Encryption key to be used when generating cryptographic hashes - -[id="{version}-plugins-{type}s-{plugin}-method"] -===== `method` - - * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5` - * Default value is `"MD5"` - -Hash function to use - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `["message"]` - -Source field(s) to base the hash calculation on - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"hashid"` - -Target field. -Will overwrite current value of a field if it exists. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/hashid-v0.1.3.asciidoc b/docs/versioned-plugins/filters/hashid-v0.1.3.asciidoc deleted file mode 100644 index 4a91fd17a..000000000 --- a/docs/versioned-plugins/filters/hashid-v0.1.3.asciidoc +++ /dev/null @@ -1,110 +0,0 @@ -:plugin: hashid -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v0.1.3 -:release_date: 2017-08-21 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-hashid/blob/v0.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Hashid filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter allow you to generate predictable, string encoded hashed keys -based om event contents and timestamp. This can be used to avoid getting -duplicate records indexed into Elasticsearch. - -Hashed keys to be generated based on full or partial hashes and -has the ability to prefix these keys based on the event timestamp in order -to make then largely ordered by timestamp, which tend to lead to increased -indexing performance for event based use cases where data is being indexed -in near real time. - -When used with the timestamp prefix enabled, it should ideally be run after -the date filter has run and populated the @timestamp field. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Hashid Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-add_timestamp_prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hash_bytes_used>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-method>> |<>, one of `["SHA1", "SHA256", "SHA384", "SHA512", "MD5"]`|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-add_timestamp_prefix"] -===== `add_timestamp_prefix` - - * Value type is <> - * Default value is `true` - -Use the timestamp to generate an ID prefix - -[id="{version}-plugins-{type}s-{plugin}-hash_bytes_used"] -===== `hash_bytes_used` - - * Value type is <> - * There is no default value for this setting. - -If full hash generated is not to be used, this parameter specifies how many bytes that should be used -If not specified, the full hash will be used - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * Default value is `"hashid"` - -Encryption key to be used when generating cryptographic hashes - -[id="{version}-plugins-{type}s-{plugin}-method"] -===== `method` - - * Value can be any of: `SHA1`, `SHA256`, `SHA384`, `SHA512`, `MD5` - * Default value is `"MD5"` - -Hash function to use - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `["message"]` - -Source field(s) to base the hash calculation on - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"hashid"` - -Target field. -Will overwrite current value of a field if it exists. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/i18n-index.asciidoc b/docs/versioned-plugins/filters/i18n-index.asciidoc deleted file mode 100644 index ee9087e6a..000000000 --- a/docs/versioned-plugins/filters/i18n-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: i18n -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::i18n-v3.0.3.asciidoc[] -include::i18n-v3.0.2.asciidoc[] -include::i18n-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/i18n-v3.0.1.asciidoc b/docs/versioned-plugins/filters/i18n-v3.0.1.asciidoc deleted file mode 100644 index 28cb23731..000000000 --- a/docs/versioned-plugins/filters/i18n-v3.0.1.asciidoc +++ /dev/null @@ -1,62 +0,0 @@ -:plugin: i18n -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-i18n/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== I18n filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The i18n filter allows you to remove special characters -from a field - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== I18n Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-transliterate>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-transliterate"] -===== `transliterate` - - * Value type is <> - * There is no default value for this setting. - -Replaces non-ASCII characters with an ASCII approximation, or -if none exists, a replacement character which defaults to `?` - -Example: -[source,ruby] - filter { - i18n { - transliterate => ["field1", "field2"] - } - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/i18n-v3.0.2.asciidoc b/docs/versioned-plugins/filters/i18n-v3.0.2.asciidoc deleted file mode 100644 index eb826214b..000000000 --- a/docs/versioned-plugins/filters/i18n-v3.0.2.asciidoc +++ /dev/null @@ -1,62 +0,0 @@ -:plugin: i18n -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-i18n/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== I18n filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The i18n filter allows you to remove special characters -from a field - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== I18n Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-transliterate>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-transliterate"] -===== `transliterate` - - * Value type is <> - * There is no default value for this setting. - -Replaces non-ASCII characters with an ASCII approximation, or -if none exists, a replacement character which defaults to `?` - -Example: -[source,ruby] - filter { - i18n { - transliterate => ["field1", "field2"] - } - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/i18n-v3.0.3.asciidoc b/docs/versioned-plugins/filters/i18n-v3.0.3.asciidoc deleted file mode 100644 index f3bf27226..000000000 --- a/docs/versioned-plugins/filters/i18n-v3.0.3.asciidoc +++ /dev/null @@ -1,62 +0,0 @@ -:plugin: i18n -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-i18n/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== I18n filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The i18n filter allows you to remove special characters -from a field - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== I18n Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-transliterate>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-transliterate"] -===== `transliterate` - - * Value type is <> - * There is no default value for this setting. - -Replaces non-ASCII characters with an ASCII approximation, or -if none exists, a replacement character which defaults to `?` - -Example: -[source,ruby] - filter { - i18n { - transliterate => ["field1", "field2"] - } - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/jdbc_static-index.asciidoc b/docs/versioned-plugins/filters/jdbc_static-index.asciidoc deleted file mode 100644 index 01d19d5b2..000000000 --- a/docs/versioned-plugins/filters/jdbc_static-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: jdbc_static -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/filters/jdbc_streaming-index.asciidoc b/docs/versioned-plugins/filters/jdbc_streaming-index.asciidoc deleted file mode 100644 index 2c6d66679..000000000 --- a/docs/versioned-plugins/filters/jdbc_streaming-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: jdbc_streaming -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::jdbc_streaming-v1.0.3.asciidoc[] -include::jdbc_streaming-v1.0.2.asciidoc[] -include::jdbc_streaming-v1.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/jdbc_streaming-v1.0.1.asciidoc b/docs/versioned-plugins/filters/jdbc_streaming-v1.0.1.asciidoc deleted file mode 100644 index fc62ca166..000000000 --- a/docs/versioned-plugins/filters/jdbc_streaming-v1.0.1.asciidoc +++ /dev/null @@ -1,226 +0,0 @@ -:plugin: jdbc_streaming -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-jdbc_streaming/blob/v1.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Jdbc_streaming filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter executes a SQL query and store the result set in the field -specified as `target`. -It will cache the results locally in an LRU cache with expiry - -For example you can load a row based on an id from in the event - -[source,ruby] -filter { - jdbc_streaming { - jdbc_driver_library => "/path/to/mysql-connector-java-5.1.34-bin.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => ""jdbc:mysql://localhost:3306/mydatabase" - jdbc_user => "me" - jdbc_password => "secret" - statement => "select * from WORLD.COUNTRY WHERE Code = :code" - parameters => { "code" => "country_code"} - target => "country_details" - } -} - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jdbc_streaming Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_expiration>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-default_hash>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_default_use>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-use_cache>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_expiration"] -===== `cache_expiration` - - * Value type is <> - * Default value is `5.0` - -The minimum number of seconds any entry should remain in the cache, defaults to 5 seconds -A numeric value, you can use decimals for example `{ "cache_expiration" => 0.25 }` -If there are transient jdbc errors the cache will store empty results for a given -parameter set and bypass the jbdc lookup, this merges the default_hash into the event, until -the cache entry expires, then the jdbc lookup will be tried again for the same parameters -Conversely, while the cache contains valid results any external problem that would cause -jdbc errors, will not be noticed for the cache_expiration period. - -[id="{version}-plugins-{type}s-{plugin}-cache_size"] -===== `cache_size` - - * Value type is <> - * Default value is `500` - -The maximum number of cache entries are stored, defaults to 500 entries -The least recently used entry will be evicted - -[id="{version}-plugins-{type}s-{plugin}-default_hash"] -===== `default_hash` - - * Value type is <> - * Default value is `{}` - -Define a default object to use when lookup fails to return a matching row. -ensure that the key names of this object match the columns from the statement - -[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] -===== `jdbc_connection_string` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC connection string - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] -===== `jdbc_driver_class` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC driver class to load, for example "oracle.jdbc.OracleDriver" or "org.apache.derby.jdbc.ClientDriver" - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] -===== `jdbc_driver_library` - - * Value type is <> - * There is no default value for this setting. - -Tentative of abstracting JDBC logic to a mixin -for potential reuse in other plugins (input/output) -This method is called when someone includes this module -Add these methods to the 'base' given. -JDBC driver library path to third party driver library. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] -===== `jdbc_password` - - * Value type is <> - * There is no default value for this setting. - -JDBC password - -[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] -===== `jdbc_user` - - * Value type is <> - * There is no default value for this setting. - -JDBC user - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] -===== `jdbc_validate_connection` - - * Value type is <> - * Default value is `false` - -Connection pool configuration. -Validate connection before use. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] -===== `jdbc_validation_timeout` - - * Value type is <> - * Default value is `3600` - -Connection pool configuration. -How often to validate a connection (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-parameters"] -===== `parameters` - - * Value type is <> - * Default value is `{}` - -Hash of query parameter, for example `{ "id" => "id_field" }` - -[id="{version}-plugins-{type}s-{plugin}-statement"] -===== `statement` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Statement to execute. -To use parameters, use named parameter syntax, for example "SELECT * FROM MYTABLE WHERE ID = :id" - -[id="{version}-plugins-{type}s-{plugin}-tag_on_default_use"] -===== `tag_on_default_use` - - * Value type is <> - * Default value is `["_jdbcstreamingdefaultsused"]` - -Append values to the `tags` field if no record was found and default values were used - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_jdbcstreamingfailure"]` - -Append values to the `tags` field if sql error occured - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Define the target field to store the extracted result(s) -Field is overwritten if exists - -[id="{version}-plugins-{type}s-{plugin}-use_cache"] -===== `use_cache` - - * Value type is <> - * Default value is `true` - -Enable or disable caching, boolean true or false, defaults to true - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/jdbc_streaming-v1.0.2.asciidoc b/docs/versioned-plugins/filters/jdbc_streaming-v1.0.2.asciidoc deleted file mode 100644 index 3997ee0ab..000000000 --- a/docs/versioned-plugins/filters/jdbc_streaming-v1.0.2.asciidoc +++ /dev/null @@ -1,226 +0,0 @@ -:plugin: jdbc_streaming -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-jdbc_streaming/blob/v1.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Jdbc_streaming filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter executes a SQL query and store the result set in the field -specified as `target`. -It will cache the results locally in an LRU cache with expiry - -For example you can load a row based on an id from in the event - -[source,ruby] -filter { - jdbc_streaming { - jdbc_driver_library => "/path/to/mysql-connector-java-5.1.34-bin.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => ""jdbc:mysql://localhost:3306/mydatabase" - jdbc_user => "me" - jdbc_password => "secret" - statement => "select * from WORLD.COUNTRY WHERE Code = :code" - parameters => { "code" => "country_code"} - target => "country_details" - } -} - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jdbc_streaming Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_expiration>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-default_hash>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_default_use>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-use_cache>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_expiration"] -===== `cache_expiration` - - * Value type is <> - * Default value is `5.0` - -The minimum number of seconds any entry should remain in the cache, defaults to 5 seconds -A numeric value, you can use decimals for example `{ "cache_expiration" => 0.25 }` -If there are transient jdbc errors the cache will store empty results for a given -parameter set and bypass the jbdc lookup, this merges the default_hash into the event, until -the cache entry expires, then the jdbc lookup will be tried again for the same parameters -Conversely, while the cache contains valid results any external problem that would cause -jdbc errors, will not be noticed for the cache_expiration period. - -[id="{version}-plugins-{type}s-{plugin}-cache_size"] -===== `cache_size` - - * Value type is <> - * Default value is `500` - -The maximum number of cache entries are stored, defaults to 500 entries -The least recently used entry will be evicted - -[id="{version}-plugins-{type}s-{plugin}-default_hash"] -===== `default_hash` - - * Value type is <> - * Default value is `{}` - -Define a default object to use when lookup fails to return a matching row. -ensure that the key names of this object match the columns from the statement - -[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] -===== `jdbc_connection_string` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC connection string - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] -===== `jdbc_driver_class` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC driver class to load, for example "oracle.jdbc.OracleDriver" or "org.apache.derby.jdbc.ClientDriver" - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] -===== `jdbc_driver_library` - - * Value type is <> - * There is no default value for this setting. - -Tentative of abstracting JDBC logic to a mixin -for potential reuse in other plugins (input/output) -This method is called when someone includes this module -Add these methods to the 'base' given. -JDBC driver library path to third party driver library. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] -===== `jdbc_password` - - * Value type is <> - * There is no default value for this setting. - -JDBC password - -[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] -===== `jdbc_user` - - * Value type is <> - * There is no default value for this setting. - -JDBC user - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] -===== `jdbc_validate_connection` - - * Value type is <> - * Default value is `false` - -Connection pool configuration. -Validate connection before use. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] -===== `jdbc_validation_timeout` - - * Value type is <> - * Default value is `3600` - -Connection pool configuration. -How often to validate a connection (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-parameters"] -===== `parameters` - - * Value type is <> - * Default value is `{}` - -Hash of query parameter, for example `{ "id" => "id_field" }` - -[id="{version}-plugins-{type}s-{plugin}-statement"] -===== `statement` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Statement to execute. -To use parameters, use named parameter syntax, for example "SELECT * FROM MYTABLE WHERE ID = :id" - -[id="{version}-plugins-{type}s-{plugin}-tag_on_default_use"] -===== `tag_on_default_use` - - * Value type is <> - * Default value is `["_jdbcstreamingdefaultsused"]` - -Append values to the `tags` field if no record was found and default values were used - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_jdbcstreamingfailure"]` - -Append values to the `tags` field if sql error occured - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Define the target field to store the extracted result(s) -Field is overwritten if exists - -[id="{version}-plugins-{type}s-{plugin}-use_cache"] -===== `use_cache` - - * Value type is <> - * Default value is `true` - -Enable or disable caching, boolean true or false, defaults to true - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/jdbc_streaming-v1.0.3.asciidoc b/docs/versioned-plugins/filters/jdbc_streaming-v1.0.3.asciidoc deleted file mode 100644 index 5728db64d..000000000 --- a/docs/versioned-plugins/filters/jdbc_streaming-v1.0.3.asciidoc +++ /dev/null @@ -1,226 +0,0 @@ -:plugin: jdbc_streaming -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-jdbc_streaming/blob/v1.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Jdbc_streaming filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter executes a SQL query and store the result set in the field -specified as `target`. -It will cache the results locally in an LRU cache with expiry - -For example you can load a row based on an id from in the event - -[source,ruby] -filter { - jdbc_streaming { - jdbc_driver_library => "/path/to/mysql-connector-java-5.1.34-bin.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => ""jdbc:mysql://localhost:3306/mydatabase" - jdbc_user => "me" - jdbc_password => "secret" - statement => "select * from WORLD.COUNTRY WHERE Code = :code" - parameters => { "code" => "country_code"} - target => "country_details" - } -} - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jdbc_streaming Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_expiration>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-default_hash>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_default_use>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-use_cache>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_expiration"] -===== `cache_expiration` - - * Value type is <> - * Default value is `5.0` - -The minimum number of seconds any entry should remain in the cache, defaults to 5 seconds -A numeric value, you can use decimals for example `{ "cache_expiration" => 0.25 }` -If there are transient jdbc errors the cache will store empty results for a given -parameter set and bypass the jbdc lookup, this merges the default_hash into the event, until -the cache entry expires, then the jdbc lookup will be tried again for the same parameters -Conversely, while the cache contains valid results any external problem that would cause -jdbc errors, will not be noticed for the cache_expiration period. - -[id="{version}-plugins-{type}s-{plugin}-cache_size"] -===== `cache_size` - - * Value type is <> - * Default value is `500` - -The maximum number of cache entries are stored, defaults to 500 entries -The least recently used entry will be evicted - -[id="{version}-plugins-{type}s-{plugin}-default_hash"] -===== `default_hash` - - * Value type is <> - * Default value is `{}` - -Define a default object to use when lookup fails to return a matching row. -ensure that the key names of this object match the columns from the statement - -[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] -===== `jdbc_connection_string` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC connection string - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] -===== `jdbc_driver_class` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC driver class to load, for example "oracle.jdbc.OracleDriver" or "org.apache.derby.jdbc.ClientDriver" - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] -===== `jdbc_driver_library` - - * Value type is <> - * There is no default value for this setting. - -Tentative of abstracting JDBC logic to a mixin -for potential reuse in other plugins (input/output) -This method is called when someone includes this module -Add these methods to the 'base' given. -JDBC driver library path to third party driver library. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] -===== `jdbc_password` - - * Value type is <> - * There is no default value for this setting. - -JDBC password - -[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] -===== `jdbc_user` - - * Value type is <> - * There is no default value for this setting. - -JDBC user - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] -===== `jdbc_validate_connection` - - * Value type is <> - * Default value is `false` - -Connection pool configuration. -Validate connection before use. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] -===== `jdbc_validation_timeout` - - * Value type is <> - * Default value is `3600` - -Connection pool configuration. -How often to validate a connection (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-parameters"] -===== `parameters` - - * Value type is <> - * Default value is `{}` - -Hash of query parameter, for example `{ "id" => "id_field" }` - -[id="{version}-plugins-{type}s-{plugin}-statement"] -===== `statement` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Statement to execute. -To use parameters, use named parameter syntax, for example "SELECT * FROM MYTABLE WHERE ID = :id" - -[id="{version}-plugins-{type}s-{plugin}-tag_on_default_use"] -===== `tag_on_default_use` - - * Value type is <> - * Default value is `["_jdbcstreamingdefaultsused"]` - -Append values to the `tags` field if no record was found and default values were used - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_jdbcstreamingfailure"]` - -Append values to the `tags` field if sql error occured - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Define the target field to store the extracted result(s) -Field is overwritten if exists - -[id="{version}-plugins-{type}s-{plugin}-use_cache"] -===== `use_cache` - - * Value type is <> - * Default value is `true` - -Enable or disable caching, boolean true or false, defaults to true - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/json-index.asciidoc b/docs/versioned-plugins/filters/json-index.asciidoc deleted file mode 100644 index 2d863e241..000000000 --- a/docs/versioned-plugins/filters/json-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: json -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::json-v3.0.5.asciidoc[] -include::json-v3.0.4.asciidoc[] -include::json-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/filters/json-v3.0.3.asciidoc b/docs/versioned-plugins/filters/json-v3.0.3.asciidoc deleted file mode 100644 index 349e6fb5f..000000000 --- a/docs/versioned-plugins/filters/json-v3.0.3.asciidoc +++ /dev/null @@ -1,121 +0,0 @@ -:plugin: json -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-json/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Json filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This is a JSON parsing filter. It takes an existing field which contains JSON and -expands it into an actual data structure within the Logstash event. - -By default it will place the parsed JSON in the root (top level) of the Logstash event, but this -filter can be configured to place the JSON into any arbitrary event field, using the -`target` configuration. - -This plugin has a few fallback scenario when something bad happen during the parsing of the event. -If the JSON parsing fails on the data, the event will be untouched and it will be tagged with a -`_jsonparsefailure` then you can use conditionals to clean the data. You can configured this tag with then -`tag_on_failure` option. - -If the parsed data contains a `@timestamp` field, we will try to use it for the event's `@timestamp`, if the -parsing fails, the field will be renamed to `_@timestamp` and the event will be tagged with a -`_timestampparsefailure`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Json Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-skip_on_invalid_json>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-skip_on_invalid_json"] -===== `skip_on_invalid_json` - - * Value type is <> - * Default value is `false` - -Allow to skip filter on invalid json (allows to handle json and non-json data without warnings) - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The configuration for the JSON filter: -[source,ruby] - source => source_field - -For example, if you have JSON data in the `message` field: -[source,ruby] - filter { - json { - source => "message" - } - } - -The above would parse the json from the `message` field - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_jsonparsefailure"]` - -Append values to the `tags` field when there has been no -successful match - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define the target field for placing the parsed data. If this setting is -omitted, the JSON data will be stored at the root (top level) of the event. - -For example, if you want the data to be put in the `doc` field: -[source,ruby] - filter { - json { - target => "doc" - } - } - -JSON in the value of the `source` field will be expanded into a -data structure in the `target` field. - -NOTE: if the `target` field already exists, it will be overwritten! - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/json-v3.0.4.asciidoc b/docs/versioned-plugins/filters/json-v3.0.4.asciidoc deleted file mode 100644 index ab2b60227..000000000 --- a/docs/versioned-plugins/filters/json-v3.0.4.asciidoc +++ /dev/null @@ -1,121 +0,0 @@ -:plugin: json -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-json/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Json filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This is a JSON parsing filter. It takes an existing field which contains JSON and -expands it into an actual data structure within the Logstash event. - -By default it will place the parsed JSON in the root (top level) of the Logstash event, but this -filter can be configured to place the JSON into any arbitrary event field, using the -`target` configuration. - -This plugin has a few fallback scenario when something bad happen during the parsing of the event. -If the JSON parsing fails on the data, the event will be untouched and it will be tagged with a -`_jsonparsefailure` then you can use conditionals to clean the data. You can configured this tag with then -`tag_on_failure` option. - -If the parsed data contains a `@timestamp` field, we will try to use it for the event's `@timestamp`, if the -parsing fails, the field will be renamed to `_@timestamp` and the event will be tagged with a -`_timestampparsefailure`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Json Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-skip_on_invalid_json>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-skip_on_invalid_json"] -===== `skip_on_invalid_json` - - * Value type is <> - * Default value is `false` - -Allow to skip filter on invalid json (allows to handle json and non-json data without warnings) - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The configuration for the JSON filter: -[source,ruby] - source => source_field - -For example, if you have JSON data in the `message` field: -[source,ruby] - filter { - json { - source => "message" - } - } - -The above would parse the json from the `message` field - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_jsonparsefailure"]` - -Append values to the `tags` field when there has been no -successful match - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define the target field for placing the parsed data. If this setting is -omitted, the JSON data will be stored at the root (top level) of the event. - -For example, if you want the data to be put in the `doc` field: -[source,ruby] - filter { - json { - target => "doc" - } - } - -JSON in the value of the `source` field will be expanded into a -data structure in the `target` field. - -NOTE: if the `target` field already exists, it will be overwritten! - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/json-v3.0.5.asciidoc b/docs/versioned-plugins/filters/json-v3.0.5.asciidoc deleted file mode 100644 index 48670c780..000000000 --- a/docs/versioned-plugins/filters/json-v3.0.5.asciidoc +++ /dev/null @@ -1,121 +0,0 @@ -:plugin: json -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-json/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Json filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This is a JSON parsing filter. It takes an existing field which contains JSON and -expands it into an actual data structure within the Logstash event. - -By default it will place the parsed JSON in the root (top level) of the Logstash event, but this -filter can be configured to place the JSON into any arbitrary event field, using the -`target` configuration. - -This plugin has a few fallback scenario when something bad happen during the parsing of the event. -If the JSON parsing fails on the data, the event will be untouched and it will be tagged with a -`_jsonparsefailure` then you can use conditionals to clean the data. You can configured this tag with then -`tag_on_failure` option. - -If the parsed data contains a `@timestamp` field, we will try to use it for the event's `@timestamp`, if the -parsing fails, the field will be renamed to `_@timestamp` and the event will be tagged with a -`_timestampparsefailure`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Json Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-skip_on_invalid_json>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-skip_on_invalid_json"] -===== `skip_on_invalid_json` - - * Value type is <> - * Default value is `false` - -Allow to skip filter on invalid json (allows to handle json and non-json data without warnings) - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The configuration for the JSON filter: -[source,ruby] - source => source_field - -For example, if you have JSON data in the `message` field: -[source,ruby] - filter { - json { - source => "message" - } - } - -The above would parse the json from the `message` field - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_jsonparsefailure"]` - -Append values to the `tags` field when there has been no -successful match - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define the target field for placing the parsed data. If this setting is -omitted, the JSON data will be stored at the root (top level) of the event. - -For example, if you want the data to be put in the `doc` field: -[source,ruby] - filter { - json { - target => "doc" - } - } - -JSON in the value of the `source` field will be expanded into a -data structure in the `target` field. - -NOTE: if the `target` field already exists, it will be overwritten! - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/json_encode-index.asciidoc b/docs/versioned-plugins/filters/json_encode-index.asciidoc deleted file mode 100644 index bbff604c3..000000000 --- a/docs/versioned-plugins/filters/json_encode-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: json_encode -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::json_encode-v3.0.3.asciidoc[] -include::json_encode-v3.0.2.asciidoc[] -include::json_encode-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/json_encode-v3.0.1.asciidoc b/docs/versioned-plugins/filters/json_encode-v3.0.1.asciidoc deleted file mode 100644 index 2b060a5dc..000000000 --- a/docs/versioned-plugins/filters/json_encode-v3.0.1.asciidoc +++ /dev/null @@ -1,76 +0,0 @@ -:plugin: json_encode -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-json_encode/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Json_encode filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -JSON encode filter. Takes a field and serializes it into JSON - -If no target is specified, the source field is overwritten with the JSON -text. - -For example, if you have a field named `foo`, and you want to store the -JSON encoded string in `bar`, do this: -[source,ruby] - filter { - json_encode { - source => "foo" - target => "bar" - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Json_encode Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field to convert to JSON. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -The field to write the JSON into. If not specified, the source -field will be overwritten. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/json_encode-v3.0.2.asciidoc b/docs/versioned-plugins/filters/json_encode-v3.0.2.asciidoc deleted file mode 100644 index a5559815c..000000000 --- a/docs/versioned-plugins/filters/json_encode-v3.0.2.asciidoc +++ /dev/null @@ -1,76 +0,0 @@ -:plugin: json_encode -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-json_encode/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Json_encode filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -JSON encode filter. Takes a field and serializes it into JSON - -If no target is specified, the source field is overwritten with the JSON -text. - -For example, if you have a field named `foo`, and you want to store the -JSON encoded string in `bar`, do this: -[source,ruby] - filter { - json_encode { - source => "foo" - target => "bar" - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Json_encode Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field to convert to JSON. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -The field to write the JSON into. If not specified, the source -field will be overwritten. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/json_encode-v3.0.3.asciidoc b/docs/versioned-plugins/filters/json_encode-v3.0.3.asciidoc deleted file mode 100644 index f20f3f037..000000000 --- a/docs/versioned-plugins/filters/json_encode-v3.0.3.asciidoc +++ /dev/null @@ -1,76 +0,0 @@ -:plugin: json_encode -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-json_encode/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Json_encode filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -JSON encode filter. Takes a field and serializes it into JSON - -If no target is specified, the source field is overwritten with the JSON -text. - -For example, if you have a field named `foo`, and you want to store the -JSON encoded string in `bar`, do this: -[source,ruby] - filter { - json_encode { - source => "foo" - target => "bar" - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Json_encode Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field to convert to JSON. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -The field to write the JSON into. If not specified, the source -field will be overwritten. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/kubernetes_metadata-index.asciidoc b/docs/versioned-plugins/filters/kubernetes_metadata-index.asciidoc deleted file mode 100644 index 78b609133..000000000 --- a/docs/versioned-plugins/filters/kubernetes_metadata-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: kubernetes_metadata -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/filters/kv-index.asciidoc b/docs/versioned-plugins/filters/kv-index.asciidoc deleted file mode 100644 index afe7e4d62..000000000 --- a/docs/versioned-plugins/filters/kv-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: kv -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::kv-v4.0.3.asciidoc[] -include::kv-v4.0.2.asciidoc[] -include::kv-v4.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/kv-v4.0.1.asciidoc b/docs/versioned-plugins/filters/kv-v4.0.1.asciidoc deleted file mode 100644 index 1bedceb04..000000000 --- a/docs/versioned-plugins/filters/kv-v4.0.1.asciidoc +++ /dev/null @@ -1,409 +0,0 @@ -:plugin: kv -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-kv/blob/v4.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Kv filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter helps automatically parse messages (or specific event fields) -which are of the `foo=bar` variety. - -For example, if you have a log message which contains `ip=1.2.3.4 -error=REFUSED`, you can parse those automatically by configuring: -[source,ruby] - filter { - kv { } - } - -The above will result in a message of `ip=1.2.3.4 error=REFUSED` having -the fields: - -* `ip: 1.2.3.4` -* `error: REFUSED` - -This is great for postfix, iptables, and other types of logs that -tend towards `key=value` syntax. - -You can configure any arbitrary strings to split your data on, -in case your data is not structured using `=` signs and whitespace. -For example, this filter can also be used to parse query parameters like -`foo=bar&baz=fizz` by setting the `field_split` parameter to `&`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kv Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-allow_duplicate_values>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-default_keys>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_keys>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_split>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_brackets>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_keys>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-recursive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-remove_char_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-remove_char_value>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-transform_key>> |<>, one of `["lowercase", "uppercase", "capitalize"]`|No -| <<{version}-plugins-{type}s-{plugin}-transform_value>> |<>, one of `["lowercase", "uppercase", "capitalize"]`|No -| <<{version}-plugins-{type}s-{plugin}-trim_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-trim_value>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-value_split>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-allow_duplicate_values"] -===== `allow_duplicate_values` - - * Value type is <> - * Default value is `true` - -A bool option for removing duplicate key/value pairs. When set to false, only -one unique key/value pair will be preserved. - -For example, consider a source like `from=me from=me`. `[from]` will map to -an Array with two elements: `["me", "me"]`. To only keep unique key/value pairs, -you could use this configuration: -[source,ruby] - filter { - kv { - allow_duplicate_values => false - } - } - -[id="{version}-plugins-{type}s-{plugin}-default_keys"] -===== `default_keys` - - * Value type is <> - * Default value is `{}` - -A hash specifying the default keys and their values which should be added to the event -in case these keys do not exist in the source field being parsed. -[source,ruby] - filter { - kv { - default_keys => [ "from", "logstash@example.com", - "to", "default@dev.null" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-exclude_keys"] -===== `exclude_keys` - - * Value type is <> - * Default value is `[]` - -An array specifying the parsed keys which should not be added to the event. -By default no keys will be excluded. - -For example, consider a source like `Hey, from=, to=def foo=bar`. -To exclude `from` and `to`, but retain the `foo` key, you could use this configuration: -[source,ruby] - filter { - kv { - exclude_keys => [ "from", "to" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-field_split"] -===== `field_split` - - * Value type is <> - * Default value is `" "` - -A string of characters to use as delimiters for parsing out key-value pairs. - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -#### Example with URL Query Strings - -For example, to split out the args from a url query string such as -`?pin=12345~0&d=123&e=foo@bar.com&oq=bobo&ss=12345`: -[source,ruby] - filter { - kv { - field_split => "&?" - } - } - -The above splits on both `&` and `?` characters, giving you the following -fields: - -* `pin: 12345~0` -* `d: 123` -* `e: foo@bar.com` -* `oq: bobo` -* `ss: 12345` - -[id="{version}-plugins-{type}s-{plugin}-include_brackets"] -===== `include_brackets` - - * Value type is <> - * Default value is `true` - -A boolean specifying whether to treat square brackets, angle brackets, -and parentheses as value "wrappers" that should be removed from the value. -[source,ruby] - filter { - kv { - include_brackets => true - } - } - -For example, the result of this line: -`bracketsone=(hello world) bracketstwo=[hello world] bracketsthree=` - -will be: - -* bracketsone: hello world -* bracketstwo: hello world -* bracketsthree: hello world - -instead of: - -* bracketsone: (hello -* bracketstwo: [hello -* bracketsthree: > - * Default value is `[]` - -An array specifying the parsed keys which should be added to the event. -By default all keys will be added. - -For example, consider a source like `Hey, from=, to=def foo=bar`. -To include `from` and `to`, but exclude the `foo` key, you could use this configuration: -[source,ruby] - filter { - kv { - include_keys => [ "from", "to" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-prefix"] -===== `prefix` - - * Value type is <> - * Default value is `""` - -A string to prepend to all of the extracted keys. - -For example, to prepend arg_ to all keys: -[source,ruby] - filter { kv { prefix => "arg_" } } - -[id="{version}-plugins-{type}s-{plugin}-recursive"] -===== `recursive` - - * Value type is <> - * Default value is `false` - -A boolean specifying whether to drill down into values -and recursively get more key-value pairs from it. -The extra key-value pairs will be stored as subkeys of the root key. - -Default is not to recursive values. -[source,ruby] - filter { - kv { - recursive => "true" - } - } - - -[id="{version}-plugins-{type}s-{plugin}-remove_char_key"] -===== `remove_char_key` - - * Value type is <> - * There is no default value for this setting. - -A string of characters to remove from the key. - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -Contrary to trim option, all characters are removed from the key, whatever their position. - -For example, to remove `<` `>` `[` `]` and `,` characters from keys: -[source,ruby] - filter { - kv { - remove_char_key => "<>\[\]," - } - } - -[id="{version}-plugins-{type}s-{plugin}-remove_char_value"] -===== `remove_char_value` - - * Value type is <> - * There is no default value for this setting. - -A string of characters to remove from the value. - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -Contrary to trim option, all characters are removed from the value, whatever their position. - -For example, to remove `<`, `>`, `[`, `]` and `,` characters from values: -[source,ruby] - filter { - kv { - remove_char_value => "<>\[\]," - } - } - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The field to perform `key=value` searching on - -For example, to process the `not_the_message` field: -[source,ruby] - filter { kv { source => "not_the_message" } } - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -The name of the container to put all of the key-value pairs into. - -If this setting is omitted, fields will be written to the root of the -event, as individual fields. - -For example, to place all keys into the event field kv: -[source,ruby] - filter { kv { target => "kv" } } - -[id="{version}-plugins-{type}s-{plugin}-transform_key"] -===== `transform_key` - - * Value can be any of: `lowercase`, `uppercase`, `capitalize` - * There is no default value for this setting. - -Transform keys to lower case, upper case or capitals. - -For example, to lowercase all keys: -[source,ruby] - filter { - kv { - transform_key => "lowercase" - } - } - -[id="{version}-plugins-{type}s-{plugin}-transform_value"] -===== `transform_value` - - * Value can be any of: `lowercase`, `uppercase`, `capitalize` - * There is no default value for this setting. - -Transform values to lower case, upper case or capitals. - -For example, to capitalize all values: -[source,ruby] - filter { - kv { - transform_value => "capitalize" - } - } - -[id="{version}-plugins-{type}s-{plugin}-trim_key"] -===== `trim_key` - - * Value type is <> - * There is no default value for this setting. - -A string of characters to trim from the key. This is useful if your -keys are wrapped in brackets or start with space. - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -Only leading and trailing characters are trimed from the key. - -For example, to trim `<` `>` `[` `]` and `,` characters from keys: -[source,ruby] - filter { - kv { - trim_key => "<>\[\]," - } - } - -[id="{version}-plugins-{type}s-{plugin}-trim_value"] -===== `trim_value` - - * Value type is <> - * There is no default value for this setting. - -Constants used for transform check -A string of characters to trim from the value. This is useful if your -values are wrapped in brackets or are terminated with commas (like postfix -logs). - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -Only leading and trailing characters are trimed from the value. - -For example, to trim `<`, `>`, `[`, `]` and `,` characters from values: -[source,ruby] - filter { - kv { - trim_value => "<>\[\]," - } - } - -[id="{version}-plugins-{type}s-{plugin}-value_split"] -===== `value_split` - - * Value type is <> - * Default value is `"="` - -A non-empty string of characters to use as delimiters for identifying key-value relations. - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -For example, to identify key-values such as -`key1:value1 key2:value2`: -[source,ruby] - filter { kv { value_split => ":" } } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/kv-v4.0.2.asciidoc b/docs/versioned-plugins/filters/kv-v4.0.2.asciidoc deleted file mode 100644 index 3d08ad529..000000000 --- a/docs/versioned-plugins/filters/kv-v4.0.2.asciidoc +++ /dev/null @@ -1,409 +0,0 @@ -:plugin: kv -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-kv/blob/v4.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Kv filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter helps automatically parse messages (or specific event fields) -which are of the `foo=bar` variety. - -For example, if you have a log message which contains `ip=1.2.3.4 -error=REFUSED`, you can parse those automatically by configuring: -[source,ruby] - filter { - kv { } - } - -The above will result in a message of `ip=1.2.3.4 error=REFUSED` having -the fields: - -* `ip: 1.2.3.4` -* `error: REFUSED` - -This is great for postfix, iptables, and other types of logs that -tend towards `key=value` syntax. - -You can configure any arbitrary strings to split your data on, -in case your data is not structured using `=` signs and whitespace. -For example, this filter can also be used to parse query parameters like -`foo=bar&baz=fizz` by setting the `field_split` parameter to `&`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kv Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-allow_duplicate_values>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-default_keys>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_keys>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_split>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_brackets>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_keys>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-recursive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-remove_char_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-remove_char_value>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-transform_key>> |<>, one of `["lowercase", "uppercase", "capitalize"]`|No -| <<{version}-plugins-{type}s-{plugin}-transform_value>> |<>, one of `["lowercase", "uppercase", "capitalize"]`|No -| <<{version}-plugins-{type}s-{plugin}-trim_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-trim_value>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-value_split>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-allow_duplicate_values"] -===== `allow_duplicate_values` - - * Value type is <> - * Default value is `true` - -A bool option for removing duplicate key/value pairs. When set to false, only -one unique key/value pair will be preserved. - -For example, consider a source like `from=me from=me`. `[from]` will map to -an Array with two elements: `["me", "me"]`. To only keep unique key/value pairs, -you could use this configuration: -[source,ruby] - filter { - kv { - allow_duplicate_values => false - } - } - -[id="{version}-plugins-{type}s-{plugin}-default_keys"] -===== `default_keys` - - * Value type is <> - * Default value is `{}` - -A hash specifying the default keys and their values which should be added to the event -in case these keys do not exist in the source field being parsed. -[source,ruby] - filter { - kv { - default_keys => [ "from", "logstash@example.com", - "to", "default@dev.null" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-exclude_keys"] -===== `exclude_keys` - - * Value type is <> - * Default value is `[]` - -An array specifying the parsed keys which should not be added to the event. -By default no keys will be excluded. - -For example, consider a source like `Hey, from=, to=def foo=bar`. -To exclude `from` and `to`, but retain the `foo` key, you could use this configuration: -[source,ruby] - filter { - kv { - exclude_keys => [ "from", "to" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-field_split"] -===== `field_split` - - * Value type is <> - * Default value is `" "` - -A string of characters to use as delimiters for parsing out key-value pairs. - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -#### Example with URL Query Strings - -For example, to split out the args from a url query string such as -`?pin=12345~0&d=123&e=foo@bar.com&oq=bobo&ss=12345`: -[source,ruby] - filter { - kv { - field_split => "&?" - } - } - -The above splits on both `&` and `?` characters, giving you the following -fields: - -* `pin: 12345~0` -* `d: 123` -* `e: foo@bar.com` -* `oq: bobo` -* `ss: 12345` - -[id="{version}-plugins-{type}s-{plugin}-include_brackets"] -===== `include_brackets` - - * Value type is <> - * Default value is `true` - -A boolean specifying whether to treat square brackets, angle brackets, -and parentheses as value "wrappers" that should be removed from the value. -[source,ruby] - filter { - kv { - include_brackets => true - } - } - -For example, the result of this line: -`bracketsone=(hello world) bracketstwo=[hello world] bracketsthree=` - -will be: - -* bracketsone: hello world -* bracketstwo: hello world -* bracketsthree: hello world - -instead of: - -* bracketsone: (hello -* bracketstwo: [hello -* bracketsthree: > - * Default value is `[]` - -An array specifying the parsed keys which should be added to the event. -By default all keys will be added. - -For example, consider a source like `Hey, from=, to=def foo=bar`. -To include `from` and `to`, but exclude the `foo` key, you could use this configuration: -[source,ruby] - filter { - kv { - include_keys => [ "from", "to" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-prefix"] -===== `prefix` - - * Value type is <> - * Default value is `""` - -A string to prepend to all of the extracted keys. - -For example, to prepend arg_ to all keys: -[source,ruby] - filter { kv { prefix => "arg_" } } - -[id="{version}-plugins-{type}s-{plugin}-recursive"] -===== `recursive` - - * Value type is <> - * Default value is `false` - -A boolean specifying whether to drill down into values -and recursively get more key-value pairs from it. -The extra key-value pairs will be stored as subkeys of the root key. - -Default is not to recursive values. -[source,ruby] - filter { - kv { - recursive => "true" - } - } - - -[id="{version}-plugins-{type}s-{plugin}-remove_char_key"] -===== `remove_char_key` - - * Value type is <> - * There is no default value for this setting. - -A string of characters to remove from the key. - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -Contrary to trim option, all characters are removed from the key, whatever their position. - -For example, to remove `<` `>` `[` `]` and `,` characters from keys: -[source,ruby] - filter { - kv { - remove_char_key => "<>\[\]," - } - } - -[id="{version}-plugins-{type}s-{plugin}-remove_char_value"] -===== `remove_char_value` - - * Value type is <> - * There is no default value for this setting. - -A string of characters to remove from the value. - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -Contrary to trim option, all characters are removed from the value, whatever their position. - -For example, to remove `<`, `>`, `[`, `]` and `,` characters from values: -[source,ruby] - filter { - kv { - remove_char_value => "<>\[\]," - } - } - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The field to perform `key=value` searching on - -For example, to process the `not_the_message` field: -[source,ruby] - filter { kv { source => "not_the_message" } } - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -The name of the container to put all of the key-value pairs into. - -If this setting is omitted, fields will be written to the root of the -event, as individual fields. - -For example, to place all keys into the event field kv: -[source,ruby] - filter { kv { target => "kv" } } - -[id="{version}-plugins-{type}s-{plugin}-transform_key"] -===== `transform_key` - - * Value can be any of: `lowercase`, `uppercase`, `capitalize` - * There is no default value for this setting. - -Transform keys to lower case, upper case or capitals. - -For example, to lowercase all keys: -[source,ruby] - filter { - kv { - transform_key => "lowercase" - } - } - -[id="{version}-plugins-{type}s-{plugin}-transform_value"] -===== `transform_value` - - * Value can be any of: `lowercase`, `uppercase`, `capitalize` - * There is no default value for this setting. - -Transform values to lower case, upper case or capitals. - -For example, to capitalize all values: -[source,ruby] - filter { - kv { - transform_value => "capitalize" - } - } - -[id="{version}-plugins-{type}s-{plugin}-trim_key"] -===== `trim_key` - - * Value type is <> - * There is no default value for this setting. - -A string of characters to trim from the key. This is useful if your -keys are wrapped in brackets or start with space. - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -Only leading and trailing characters are trimed from the key. - -For example, to trim `<` `>` `[` `]` and `,` characters from keys: -[source,ruby] - filter { - kv { - trim_key => "<>\[\]," - } - } - -[id="{version}-plugins-{type}s-{plugin}-trim_value"] -===== `trim_value` - - * Value type is <> - * There is no default value for this setting. - -Constants used for transform check -A string of characters to trim from the value. This is useful if your -values are wrapped in brackets or are terminated with commas (like postfix -logs). - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -Only leading and trailing characters are trimed from the value. - -For example, to trim `<`, `>`, `[`, `]` and `,` characters from values: -[source,ruby] - filter { - kv { - trim_value => "<>\[\]," - } - } - -[id="{version}-plugins-{type}s-{plugin}-value_split"] -===== `value_split` - - * Value type is <> - * Default value is `"="` - -A non-empty string of characters to use as delimiters for identifying key-value relations. - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -For example, to identify key-values such as -`key1:value1 key2:value2`: -[source,ruby] - filter { kv { value_split => ":" } } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/kv-v4.0.3.asciidoc b/docs/versioned-plugins/filters/kv-v4.0.3.asciidoc deleted file mode 100644 index 2dbd0070c..000000000 --- a/docs/versioned-plugins/filters/kv-v4.0.3.asciidoc +++ /dev/null @@ -1,409 +0,0 @@ -:plugin: kv -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-kv/blob/v4.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Kv filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter helps automatically parse messages (or specific event fields) -which are of the `foo=bar` variety. - -For example, if you have a log message which contains `ip=1.2.3.4 -error=REFUSED`, you can parse those automatically by configuring: -[source,ruby] - filter { - kv { } - } - -The above will result in a message of `ip=1.2.3.4 error=REFUSED` having -the fields: - -* `ip: 1.2.3.4` -* `error: REFUSED` - -This is great for postfix, iptables, and other types of logs that -tend towards `key=value` syntax. - -You can configure any arbitrary strings to split your data on, -in case your data is not structured using `=` signs and whitespace. -For example, this filter can also be used to parse query parameters like -`foo=bar&baz=fizz` by setting the `field_split` parameter to `&`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kv Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-allow_duplicate_values>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-default_keys>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_keys>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_split>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_brackets>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_keys>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-recursive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-remove_char_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-remove_char_value>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-transform_key>> |<>, one of `["lowercase", "uppercase", "capitalize"]`|No -| <<{version}-plugins-{type}s-{plugin}-transform_value>> |<>, one of `["lowercase", "uppercase", "capitalize"]`|No -| <<{version}-plugins-{type}s-{plugin}-trim_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-trim_value>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-value_split>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-allow_duplicate_values"] -===== `allow_duplicate_values` - - * Value type is <> - * Default value is `true` - -A bool option for removing duplicate key/value pairs. When set to false, only -one unique key/value pair will be preserved. - -For example, consider a source like `from=me from=me`. `[from]` will map to -an Array with two elements: `["me", "me"]`. To only keep unique key/value pairs, -you could use this configuration: -[source,ruby] - filter { - kv { - allow_duplicate_values => false - } - } - -[id="{version}-plugins-{type}s-{plugin}-default_keys"] -===== `default_keys` - - * Value type is <> - * Default value is `{}` - -A hash specifying the default keys and their values which should be added to the event -in case these keys do not exist in the source field being parsed. -[source,ruby] - filter { - kv { - default_keys => [ "from", "logstash@example.com", - "to", "default@dev.null" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-exclude_keys"] -===== `exclude_keys` - - * Value type is <> - * Default value is `[]` - -An array specifying the parsed keys which should not be added to the event. -By default no keys will be excluded. - -For example, consider a source like `Hey, from=, to=def foo=bar`. -To exclude `from` and `to`, but retain the `foo` key, you could use this configuration: -[source,ruby] - filter { - kv { - exclude_keys => [ "from", "to" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-field_split"] -===== `field_split` - - * Value type is <> - * Default value is `" "` - -A string of characters to use as delimiters for parsing out key-value pairs. - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -#### Example with URL Query Strings - -For example, to split out the args from a url query string such as -`?pin=12345~0&d=123&e=foo@bar.com&oq=bobo&ss=12345`: -[source,ruby] - filter { - kv { - field_split => "&?" - } - } - -The above splits on both `&` and `?` characters, giving you the following -fields: - -* `pin: 12345~0` -* `d: 123` -* `e: foo@bar.com` -* `oq: bobo` -* `ss: 12345` - -[id="{version}-plugins-{type}s-{plugin}-include_brackets"] -===== `include_brackets` - - * Value type is <> - * Default value is `true` - -A boolean specifying whether to treat square brackets, angle brackets, -and parentheses as value "wrappers" that should be removed from the value. -[source,ruby] - filter { - kv { - include_brackets => true - } - } - -For example, the result of this line: -`bracketsone=(hello world) bracketstwo=[hello world] bracketsthree=` - -will be: - -* bracketsone: hello world -* bracketstwo: hello world -* bracketsthree: hello world - -instead of: - -* bracketsone: (hello -* bracketstwo: [hello -* bracketsthree: > - * Default value is `[]` - -An array specifying the parsed keys which should be added to the event. -By default all keys will be added. - -For example, consider a source like `Hey, from=, to=def foo=bar`. -To include `from` and `to`, but exclude the `foo` key, you could use this configuration: -[source,ruby] - filter { - kv { - include_keys => [ "from", "to" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-prefix"] -===== `prefix` - - * Value type is <> - * Default value is `""` - -A string to prepend to all of the extracted keys. - -For example, to prepend arg_ to all keys: -[source,ruby] - filter { kv { prefix => "arg_" } } - -[id="{version}-plugins-{type}s-{plugin}-recursive"] -===== `recursive` - - * Value type is <> - * Default value is `false` - -A boolean specifying whether to drill down into values -and recursively get more key-value pairs from it. -The extra key-value pairs will be stored as subkeys of the root key. - -Default is not to recursive values. -[source,ruby] - filter { - kv { - recursive => "true" - } - } - - -[id="{version}-plugins-{type}s-{plugin}-remove_char_key"] -===== `remove_char_key` - - * Value type is <> - * There is no default value for this setting. - -A string of characters to remove from the key. - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -Contrary to trim option, all characters are removed from the key, whatever their position. - -For example, to remove `<` `>` `[` `]` and `,` characters from keys: -[source,ruby] - filter { - kv { - remove_char_key => "<>\[\]," - } - } - -[id="{version}-plugins-{type}s-{plugin}-remove_char_value"] -===== `remove_char_value` - - * Value type is <> - * There is no default value for this setting. - -A string of characters to remove from the value. - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -Contrary to trim option, all characters are removed from the value, whatever their position. - -For example, to remove `<`, `>`, `[`, `]` and `,` characters from values: -[source,ruby] - filter { - kv { - remove_char_value => "<>\[\]," - } - } - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The field to perform `key=value` searching on - -For example, to process the `not_the_message` field: -[source,ruby] - filter { kv { source => "not_the_message" } } - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -The name of the container to put all of the key-value pairs into. - -If this setting is omitted, fields will be written to the root of the -event, as individual fields. - -For example, to place all keys into the event field kv: -[source,ruby] - filter { kv { target => "kv" } } - -[id="{version}-plugins-{type}s-{plugin}-transform_key"] -===== `transform_key` - - * Value can be any of: `lowercase`, `uppercase`, `capitalize` - * There is no default value for this setting. - -Transform keys to lower case, upper case or capitals. - -For example, to lowercase all keys: -[source,ruby] - filter { - kv { - transform_key => "lowercase" - } - } - -[id="{version}-plugins-{type}s-{plugin}-transform_value"] -===== `transform_value` - - * Value can be any of: `lowercase`, `uppercase`, `capitalize` - * There is no default value for this setting. - -Transform values to lower case, upper case or capitals. - -For example, to capitalize all values: -[source,ruby] - filter { - kv { - transform_value => "capitalize" - } - } - -[id="{version}-plugins-{type}s-{plugin}-trim_key"] -===== `trim_key` - - * Value type is <> - * There is no default value for this setting. - -A string of characters to trim from the key. This is useful if your -keys are wrapped in brackets or start with space. - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -Only leading and trailing characters are trimed from the key. - -For example, to trim `<` `>` `[` `]` and `,` characters from keys: -[source,ruby] - filter { - kv { - trim_key => "<>\[\]," - } - } - -[id="{version}-plugins-{type}s-{plugin}-trim_value"] -===== `trim_value` - - * Value type is <> - * There is no default value for this setting. - -Constants used for transform check -A string of characters to trim from the value. This is useful if your -values are wrapped in brackets or are terminated with commas (like postfix -logs). - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -Only leading and trailing characters are trimed from the value. - -For example, to trim `<`, `>`, `[`, `]` and `,` characters from values: -[source,ruby] - filter { - kv { - trim_value => "<>\[\]," - } - } - -[id="{version}-plugins-{type}s-{plugin}-value_split"] -===== `value_split` - - * Value type is <> - * Default value is `"="` - -A non-empty string of characters to use as delimiters for identifying key-value relations. - -These characters form a regex character class and thus you must escape special regex -characters like `[` or `]` using `\`. - -For example, to identify key-values such as -`key1:value1 key2:value2`: -[source,ruby] - filter { kv { value_split => ":" } } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/language-index.asciidoc b/docs/versioned-plugins/filters/language-index.asciidoc deleted file mode 100644 index 6feb0e015..000000000 --- a/docs/versioned-plugins/filters/language-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: language -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/filters/lookup-index.asciidoc b/docs/versioned-plugins/filters/lookup-index.asciidoc deleted file mode 100644 index a488c3855..000000000 --- a/docs/versioned-plugins/filters/lookup-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: lookup -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/filters/math-index.asciidoc b/docs/versioned-plugins/filters/math-index.asciidoc deleted file mode 100644 index f2aba84a1..000000000 --- a/docs/versioned-plugins/filters/math-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: math -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/filters/metaevent-index.asciidoc b/docs/versioned-plugins/filters/metaevent-index.asciidoc deleted file mode 100644 index c954ee15b..000000000 --- a/docs/versioned-plugins/filters/metaevent-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: metaevent -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::metaevent-v2.0.7.asciidoc[] -include::metaevent-v2.0.5.asciidoc[] - diff --git a/docs/versioned-plugins/filters/metaevent-v2.0.5.asciidoc b/docs/versioned-plugins/filters/metaevent-v2.0.5.asciidoc deleted file mode 100644 index 05a009ee1..000000000 --- a/docs/versioned-plugins/filters/metaevent-v2.0.5.asciidoc +++ /dev/null @@ -1,62 +0,0 @@ -:plugin: metaevent -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-metaevent/blob/v2.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Metaevent filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Metaevent Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-followed_by_tags>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-period>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-followed_by_tags"] -===== `followed_by_tags` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -syntax: `followed_by_tags => [ "tag", "tag" ]` - -[id="{version}-plugins-{type}s-{plugin}-period"] -===== `period` - - * Value type is <> - * Default value is `5` - -syntax: `period => 60` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/metaevent-v2.0.7.asciidoc b/docs/versioned-plugins/filters/metaevent-v2.0.7.asciidoc deleted file mode 100644 index 7394fcb66..000000000 --- a/docs/versioned-plugins/filters/metaevent-v2.0.7.asciidoc +++ /dev/null @@ -1,62 +0,0 @@ -:plugin: metaevent -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.7 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-metaevent/blob/v2.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Metaevent filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Periodically group all events under a certain list of tags into a single event. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Metaevent Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-followed_by_tags>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-period>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-followed_by_tags"] -===== `followed_by_tags` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -syntax: `followed_by_tags => [ "tag", "tag" ]` - -[id="{version}-plugins-{type}s-{plugin}-period"] -===== `period` - - * Value type is <> - * Default value is `5` - -syntax: `period => 60` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/metricize-index.asciidoc b/docs/versioned-plugins/filters/metricize-index.asciidoc deleted file mode 100644 index 79132cc71..000000000 --- a/docs/versioned-plugins/filters/metricize-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: metricize -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::metricize-v3.0.3.asciidoc[] -include::metricize-v3.0.2.asciidoc[] -include::metricize-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/metricize-v3.0.1.asciidoc b/docs/versioned-plugins/filters/metricize-v3.0.1.asciidoc deleted file mode 100644 index f720d899b..000000000 --- a/docs/versioned-plugins/filters/metricize-v3.0.1.asciidoc +++ /dev/null @@ -1,109 +0,0 @@ -:plugin: metricize -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-metricize/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Metricize filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The metricize filter takes complex events containing a number of metrics -and splits these up into multiple events, each holding a single metric. - -Example: - - Assume the following filter configuration: - - filter { - metricize { - metrics => [ "metric1", "metric2" ] - } - } - - Assuming the following event is passed in: - - { - type => "type A" - metric1 => "value1" - metric2 => "value2" - } - - This will result in the following 2 events being generated in addition to the original event: - - { { - type => "type A" type => "type A" - metric => "metric1" metric => "metric2" - value => "value1" value => "value2" - } } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Metricize Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-drop_original_event>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metric_field_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-value_field_name>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-drop_original_event"] -===== `drop_original_event` - - * Value type is <> - * Default value is `false` - -Flag indicating whether the original event should be dropped or not. - -[id="{version}-plugins-{type}s-{plugin}-metric_field_name"] -===== `metric_field_name` - - * Value type is <> - * Default value is `"metric"` - -Name of the field the metric name will be written to. - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A new matrics event will be created for each metric field in this list. -All fields in this list will be removed from generated events. - -[id="{version}-plugins-{type}s-{plugin}-value_field_name"] -===== `value_field_name` - - * Value type is <> - * Default value is `"value"` - -Name of the field the metric value will be written to. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/metricize-v3.0.2.asciidoc b/docs/versioned-plugins/filters/metricize-v3.0.2.asciidoc deleted file mode 100644 index 9f06ad1d9..000000000 --- a/docs/versioned-plugins/filters/metricize-v3.0.2.asciidoc +++ /dev/null @@ -1,109 +0,0 @@ -:plugin: metricize -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-metricize/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Metricize filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The metricize filter takes complex events containing a number of metrics -and splits these up into multiple events, each holding a single metric. - -Example: - - Assume the following filter configuration: - - filter { - metricize { - metrics => [ "metric1", "metric2" ] - } - } - - Assuming the following event is passed in: - - { - type => "type A" - metric1 => "value1" - metric2 => "value2" - } - - This will result in the following 2 events being generated in addition to the original event: - - { { - type => "type A" type => "type A" - metric => "metric1" metric => "metric2" - value => "value1" value => "value2" - } } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Metricize Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-drop_original_event>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metric_field_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-value_field_name>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-drop_original_event"] -===== `drop_original_event` - - * Value type is <> - * Default value is `false` - -Flag indicating whether the original event should be dropped or not. - -[id="{version}-plugins-{type}s-{plugin}-metric_field_name"] -===== `metric_field_name` - - * Value type is <> - * Default value is `"metric"` - -Name of the field the metric name will be written to. - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A new matrics event will be created for each metric field in this list. -All fields in this list will be removed from generated events. - -[id="{version}-plugins-{type}s-{plugin}-value_field_name"] -===== `value_field_name` - - * Value type is <> - * Default value is `"value"` - -Name of the field the metric value will be written to. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/metricize-v3.0.3.asciidoc b/docs/versioned-plugins/filters/metricize-v3.0.3.asciidoc deleted file mode 100644 index cffc8245b..000000000 --- a/docs/versioned-plugins/filters/metricize-v3.0.3.asciidoc +++ /dev/null @@ -1,109 +0,0 @@ -:plugin: metricize -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-metricize/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Metricize filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The metricize filter takes complex events containing a number of metrics -and splits these up into multiple events, each holding a single metric. - -Example: - - Assume the following filter configuration: - - filter { - metricize { - metrics => [ "metric1", "metric2" ] - } - } - - Assuming the following event is passed in: - - { - type => "type A" - metric1 => "value1" - metric2 => "value2" - } - - This will result in the following 2 events being generated in addition to the original event: - - { { - type => "type A" type => "type A" - metric => "metric1" metric => "metric2" - value => "value1" value => "value2" - } } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Metricize Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-drop_original_event>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metric_field_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-value_field_name>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-drop_original_event"] -===== `drop_original_event` - - * Value type is <> - * Default value is `false` - -Flag indicating whether the original event should be dropped or not. - -[id="{version}-plugins-{type}s-{plugin}-metric_field_name"] -===== `metric_field_name` - - * Value type is <> - * Default value is `"metric"` - -Name of the field the metric name will be written to. - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A new matrics event will be created for each metric field in this list. -All fields in this list will be removed from generated events. - -[id="{version}-plugins-{type}s-{plugin}-value_field_name"] -===== `value_field_name` - - * Value type is <> - * Default value is `"value"` - -Name of the field the metric value will be written to. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/metrics-index.asciidoc b/docs/versioned-plugins/filters/metrics-index.asciidoc deleted file mode 100644 index 4d84457dc..000000000 --- a/docs/versioned-plugins/filters/metrics-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: metrics -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::metrics-v4.0.5.asciidoc[] -include::metrics-v4.0.4.asciidoc[] -include::metrics-v4.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/filters/metrics-v4.0.3.asciidoc b/docs/versioned-plugins/filters/metrics-v4.0.3.asciidoc deleted file mode 100644 index 15cd2d72e..000000000 --- a/docs/versioned-plugins/filters/metrics-v4.0.3.asciidoc +++ /dev/null @@ -1,228 +0,0 @@ -:plugin: metrics -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-metrics/blob/v4.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Metrics filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The metrics filter is useful for aggregating metrics. - -IMPORTANT: Elasticsearch 2.0 no longer allows field names with dots. Version 3.0 -of the metrics filter plugin changes behavior to use nested fields rather than -dotted notation to avoid colliding with versions of Elasticsearch 2.0+. Please -note the changes in the documentation (underscores and sub-fields used). - -For example, if you have a field `response` that is -a http response code, and you want to count each -kind of response, you can do this: -[source,ruby] - filter { - metrics { - meter => [ "http_%{response}" ] - add_tag => "metric" - } - } - -Metrics are flushed every 5 seconds by default or according to -`flush_interval`. Metrics appear as -new events in the event stream and go through any filters -that occur after as well as outputs. - -In general, you will want to add a tag to your metrics and have an output -explicitly look for that tag. - -The event that is flushed will include every 'meter' and 'timer' -metric in the following way: - -==== `meter` values - -For a `meter => "something"` you will receive the following fields: - -* "[thing][count]" - the total count of events -* "[thing][rate_1m]" - the per-second event rate in a 1-minute sliding window -* "[thing][rate_5m]" - the per-second event rate in a 5-minute sliding window -* "[thing][rate_15m]" - the per-second event rate in a 15-minute sliding window - -==== `timer` values - -For a `timer => [ "thing", "%{duration}" ]` you will receive the following fields: - -* "[thing][count]" - the total count of events -* "[thing][rate_1m]" - the per-second event rate in a 1-minute sliding window -* "[thing][rate_5m]" - the per-second event rate in a 5-minute sliding window -* "[thing][rate_15m]" - the per-second event rate in a 15-minute sliding window -* "[thing][min]" - the minimum value seen for this metric -* "[thing][max]" - the maximum value seen for this metric -* "[thing][stddev]" - the standard deviation for this metric -* "[thing][mean]" - the mean for this metric -* "[thing][pXX]" - the XXth percentile for this metric (see `percentiles`) - -The default lengths of the event rate window (1, 5, and 15 minutes) -can be configured with the `rates` option. - -==== Example: Computing event rate - -For a simple example, let's track how many events per second are running -through logstash: -[source,ruby] ----- - input { - generator { - type => "generated" - } - } - - filter { - if [type] == "generated" { - metrics { - meter => "events" - add_tag => "metric" - } - } - } - - output { - # only emit events with the 'metric' tag - if "metric" in [tags] { - stdout { - codec => line { - format => "rate: %{[events][rate_1m]}" - } - } - } - } ----- - -Running the above: -[source,ruby] - % bin/logstash -f example.conf - rate: 23721.983566819246 - rate: 24811.395722536377 - rate: 25875.892745934525 - rate: 26836.42375967113 - -We see the output includes our events' 1-minute rate. - -In the real world, you would emit this to graphite or another metrics store, -like so: -[source,ruby] - output { - graphite { - metrics => [ "events.rate_1m", "%{[events][rate_1m]}" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Metrics Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-clear_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ignore_older_than>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-meter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-percentiles>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-rates>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timer>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-clear_interval"] -===== `clear_interval` - - * Value type is <> - * Default value is `-1` - -The clear interval, when all counter are reset. - -If set to -1, the default value, the metrics will never be cleared. -Otherwise, should be a multiple of 5s. - -[id="{version}-plugins-{type}s-{plugin}-flush_interval"] -===== `flush_interval` - - * Value type is <> - * Default value is `5` - -The flush interval, when the metrics event is created. Must be a multiple of 5s. - -[id="{version}-plugins-{type}s-{plugin}-ignore_older_than"] -===== `ignore_older_than` - - * Value type is <> - * Default value is `0` - -Don't track events that have `@timestamp` older than some number of seconds. - -This is useful if you want to only include events that are near real-time -in your metrics. - -For example, to only count events that are within 10 seconds of real-time, you -would do this: - - filter { - metrics { - meter => [ "hits" ] - ignore_older_than => 10 - } - } - -[id="{version}-plugins-{type}s-{plugin}-meter"] -===== `meter` - - * Value type is <> - * Default value is `[]` - -syntax: `meter => [ "name of metric", "name of metric" ]` - -[id="{version}-plugins-{type}s-{plugin}-percentiles"] -===== `percentiles` - - * Value type is <> - * Default value is `[1, 5, 10, 90, 95, 99, 100]` - -The percentiles that should be measured and emitted for timer values. - -[id="{version}-plugins-{type}s-{plugin}-rates"] -===== `rates` - - * Value type is <> - * Default value is `[1, 5, 15]` - -The rates that should be measured, in minutes. -Possible values are 1, 5, and 15. - -[id="{version}-plugins-{type}s-{plugin}-timer"] -===== `timer` - - * Value type is <> - * Default value is `{}` - -syntax: `timer => [ "name of metric", "%{time_value}" ]` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/metrics-v4.0.4.asciidoc b/docs/versioned-plugins/filters/metrics-v4.0.4.asciidoc deleted file mode 100644 index f88dd5a2d..000000000 --- a/docs/versioned-plugins/filters/metrics-v4.0.4.asciidoc +++ /dev/null @@ -1,228 +0,0 @@ -:plugin: metrics -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-metrics/blob/v4.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Metrics filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The metrics filter is useful for aggregating metrics. - -IMPORTANT: Elasticsearch 2.0 no longer allows field names with dots. Version 3.0 -of the metrics filter plugin changes behavior to use nested fields rather than -dotted notation to avoid colliding with versions of Elasticsearch 2.0+. Please -note the changes in the documentation (underscores and sub-fields used). - -For example, if you have a field `response` that is -a http response code, and you want to count each -kind of response, you can do this: -[source,ruby] - filter { - metrics { - meter => [ "http_%{response}" ] - add_tag => "metric" - } - } - -Metrics are flushed every 5 seconds by default or according to -`flush_interval`. Metrics appear as -new events in the event stream and go through any filters -that occur after as well as outputs. - -In general, you will want to add a tag to your metrics and have an output -explicitly look for that tag. - -The event that is flushed will include every 'meter' and 'timer' -metric in the following way: - -==== `meter` values - -For a `meter => "something"` you will receive the following fields: - -* "[thing][count]" - the total count of events -* "[thing][rate_1m]" - the per-second event rate in a 1-minute sliding window -* "[thing][rate_5m]" - the per-second event rate in a 5-minute sliding window -* "[thing][rate_15m]" - the per-second event rate in a 15-minute sliding window - -==== `timer` values - -For a `timer => { "thing" => "%{duration}" }` you will receive the following fields: - -* "[thing][count]" - the total count of events -* "[thing][rate_1m]" - the per-second average value in a 1-minute sliding window -* "[thing][rate_5m]" - the per-second average value in a 5-minute sliding window -* "[thing][rate_15m]" - the per-second average value in a 15-minute sliding window -* "[thing][min]" - the minimum value seen for this metric -* "[thing][max]" - the maximum value seen for this metric -* "[thing][stddev]" - the standard deviation for this metric -* "[thing][mean]" - the mean for this metric -* "[thing][pXX]" - the XXth percentile for this metric (see `percentiles`) - -The default lengths of the event rate window (1, 5, and 15 minutes) -can be configured with the `rates` option. - -==== Example: Computing event rate - -For a simple example, let's track how many events per second are running -through logstash: -[source,ruby] ----- - input { - generator { - type => "generated" - } - } - - filter { - if [type] == "generated" { - metrics { - meter => "events" - add_tag => "metric" - } - } - } - - output { - # only emit events with the 'metric' tag - if "metric" in [tags] { - stdout { - codec => line { - format => "rate: %{[events][rate_1m]}" - } - } - } - } ----- - -Running the above: -[source,ruby] - % bin/logstash -f example.conf - rate: 23721.983566819246 - rate: 24811.395722536377 - rate: 25875.892745934525 - rate: 26836.42375967113 - -We see the output includes our events' 1-minute rate. - -In the real world, you would emit this to graphite or another metrics store, -like so: -[source,ruby] - output { - graphite { - metrics => [ "events.rate_1m", "%{[events][rate_1m]}" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Metrics Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-clear_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ignore_older_than>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-meter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-percentiles>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-rates>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timer>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-clear_interval"] -===== `clear_interval` - - * Value type is <> - * Default value is `-1` - -The clear interval, when all counter are reset. - -If set to -1, the default value, the metrics will never be cleared. -Otherwise, should be a multiple of 5s. - -[id="{version}-plugins-{type}s-{plugin}-flush_interval"] -===== `flush_interval` - - * Value type is <> - * Default value is `5` - -The flush interval, when the metrics event is created. Must be a multiple of 5s. - -[id="{version}-plugins-{type}s-{plugin}-ignore_older_than"] -===== `ignore_older_than` - - * Value type is <> - * Default value is `0` - -Don't track events that have `@timestamp` older than some number of seconds. - -This is useful if you want to only include events that are near real-time -in your metrics. - -For example, to only count events that are within 10 seconds of real-time, you -would do this: - - filter { - metrics { - meter => [ "hits" ] - ignore_older_than => 10 - } - } - -[id="{version}-plugins-{type}s-{plugin}-meter"] -===== `meter` - - * Value type is <> - * Default value is `[]` - -syntax: `meter => [ "name of metric", "name of metric" ]` - -[id="{version}-plugins-{type}s-{plugin}-percentiles"] -===== `percentiles` - - * Value type is <> - * Default value is `[1, 5, 10, 90, 95, 99, 100]` - -The percentiles that should be measured and emitted for timer values. - -[id="{version}-plugins-{type}s-{plugin}-rates"] -===== `rates` - - * Value type is <> - * Default value is `[1, 5, 15]` - -The rates that should be measured, in minutes. -Possible values are 1, 5, and 15. - -[id="{version}-plugins-{type}s-{plugin}-timer"] -===== `timer` - - * Value type is <> - * Default value is `{}` - -syntax: `timer => [ "name of metric", "%{time_value}" ]` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/metrics-v4.0.5.asciidoc b/docs/versioned-plugins/filters/metrics-v4.0.5.asciidoc deleted file mode 100644 index 6562f7a72..000000000 --- a/docs/versioned-plugins/filters/metrics-v4.0.5.asciidoc +++ /dev/null @@ -1,228 +0,0 @@ -:plugin: metrics -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-metrics/blob/v4.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Metrics filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The metrics filter is useful for aggregating metrics. - -IMPORTANT: Elasticsearch 2.0 no longer allows field names with dots. Version 3.0 -of the metrics filter plugin changes behavior to use nested fields rather than -dotted notation to avoid colliding with versions of Elasticsearch 2.0+. Please -note the changes in the documentation (underscores and sub-fields used). - -For example, if you have a field `response` that is -a http response code, and you want to count each -kind of response, you can do this: -[source,ruby] - filter { - metrics { - meter => [ "http_%{response}" ] - add_tag => "metric" - } - } - -Metrics are flushed every 5 seconds by default or according to -`flush_interval`. Metrics appear as -new events in the event stream and go through any filters -that occur after as well as outputs. - -In general, you will want to add a tag to your metrics and have an output -explicitly look for that tag. - -The event that is flushed will include every 'meter' and 'timer' -metric in the following way: - -==== `meter` values - -For a `meter => "something"` you will receive the following fields: - -* "[thing][count]" - the total count of events -* "[thing][rate_1m]" - the per-second event rate in a 1-minute sliding window -* "[thing][rate_5m]" - the per-second event rate in a 5-minute sliding window -* "[thing][rate_15m]" - the per-second event rate in a 15-minute sliding window - -==== `timer` values - -For a `timer => { "thing" => "%{duration}" }` you will receive the following fields: - -* "[thing][count]" - the total count of events -* "[thing][rate_1m]" - the per-second average value in a 1-minute sliding window -* "[thing][rate_5m]" - the per-second average value in a 5-minute sliding window -* "[thing][rate_15m]" - the per-second average value in a 15-minute sliding window -* "[thing][min]" - the minimum value seen for this metric -* "[thing][max]" - the maximum value seen for this metric -* "[thing][stddev]" - the standard deviation for this metric -* "[thing][mean]" - the mean for this metric -* "[thing][pXX]" - the XXth percentile for this metric (see `percentiles`) - -The default lengths of the event rate window (1, 5, and 15 minutes) -can be configured with the `rates` option. - -==== Example: Computing event rate - -For a simple example, let's track how many events per second are running -through logstash: -[source,ruby] ----- - input { - generator { - type => "generated" - } - } - - filter { - if [type] == "generated" { - metrics { - meter => "events" - add_tag => "metric" - } - } - } - - output { - # only emit events with the 'metric' tag - if "metric" in [tags] { - stdout { - codec => line { - format => "rate: %{[events][rate_1m]}" - } - } - } - } ----- - -Running the above: -[source,ruby] - % bin/logstash -f example.conf - rate: 23721.983566819246 - rate: 24811.395722536377 - rate: 25875.892745934525 - rate: 26836.42375967113 - -We see the output includes our events' 1-minute rate. - -In the real world, you would emit this to graphite or another metrics store, -like so: -[source,ruby] - output { - graphite { - metrics => [ "events.rate_1m", "%{[events][rate_1m]}" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Metrics Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-clear_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ignore_older_than>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-meter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-percentiles>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-rates>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timer>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-clear_interval"] -===== `clear_interval` - - * Value type is <> - * Default value is `-1` - -The clear interval, when all counter are reset. - -If set to -1, the default value, the metrics will never be cleared. -Otherwise, should be a multiple of 5s. - -[id="{version}-plugins-{type}s-{plugin}-flush_interval"] -===== `flush_interval` - - * Value type is <> - * Default value is `5` - -The flush interval, when the metrics event is created. Must be a multiple of 5s. - -[id="{version}-plugins-{type}s-{plugin}-ignore_older_than"] -===== `ignore_older_than` - - * Value type is <> - * Default value is `0` - -Don't track events that have `@timestamp` older than some number of seconds. - -This is useful if you want to only include events that are near real-time -in your metrics. - -For example, to only count events that are within 10 seconds of real-time, you -would do this: - - filter { - metrics { - meter => [ "hits" ] - ignore_older_than => 10 - } - } - -[id="{version}-plugins-{type}s-{plugin}-meter"] -===== `meter` - - * Value type is <> - * Default value is `[]` - -syntax: `meter => [ "name of metric", "name of metric" ]` - -[id="{version}-plugins-{type}s-{plugin}-percentiles"] -===== `percentiles` - - * Value type is <> - * Default value is `[1, 5, 10, 90, 95, 99, 100]` - -The percentiles that should be measured and emitted for timer values. - -[id="{version}-plugins-{type}s-{plugin}-rates"] -===== `rates` - - * Value type is <> - * Default value is `[1, 5, 15]` - -The rates that should be measured, in minutes. -Possible values are 1, 5, and 15. - -[id="{version}-plugins-{type}s-{plugin}-timer"] -===== `timer` - - * Value type is <> - * Default value is `{}` - -syntax: `timer => [ "name of metric", "%{time_value}" ]` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/multiline-index.asciidoc b/docs/versioned-plugins/filters/multiline-index.asciidoc deleted file mode 100644 index 3e821bbfa..000000000 --- a/docs/versioned-plugins/filters/multiline-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: multiline -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::multiline-v3.0.4.asciidoc[] -include::multiline-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/filters/multiline-v3.0.3.asciidoc b/docs/versioned-plugins/filters/multiline-v3.0.3.asciidoc deleted file mode 100644 index 499ff61fb..000000000 --- a/docs/versioned-plugins/filters/multiline-v3.0.3.asciidoc +++ /dev/null @@ -1,194 +0,0 @@ -:plugin: multiline -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-multiline/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Multiline filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - -This filter will collapse multiline messages from a single source into one Logstash event. - -The original goal of this filter was to allow joining of multi-line messages -from files into a single event. For example - joining java exception and -stacktrace messages into a single event. - -NOTE: This filter will not work with multiple worker threads `-w 2` on the logstash command line. - -The config looks like this: -[source,ruby] - filter { - multiline { - pattern => "pattern, a regexp" - negate => boolean - what => "previous" or "next" - } - } - -The `pattern` should be a regexp (<> patterns are -supported) which matches what you believe to be an indicator that the field -is part of an event consisting of multiple lines of log data. - -The `what` must be `previous` or `next` and indicates the relation -to the multi-line event. - -The `negate` can be `true` or `false` (defaults to `false`). If `true`, a -message not matching the pattern will constitute a match of the multiline -filter and the `what` will be applied. (vice-versa is also true) - -For example, Java stack traces are multiline and usually have the message -starting at the far-left, with each subsequent line indented. Do this: -[source,ruby] - filter { - multiline { - pattern => "^\s" - what => "previous" - } - } - -This says that any line starting with whitespace belongs to the previous line. - -Another example is C line continuations (backslash). Here's how to do that: -[source,ruby] - filter { - multiline { - pattern => "\\$" - what => "next" - } - } - -This says that any line ending with a backslash should be combined with the -following line. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Multiline Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-allow_duplicates>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_age>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pattern>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-stream_identity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-what>> |<>, one of `["previous", "next"]`|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-allow_duplicates"] -===== `allow_duplicates` - - * Value type is <> - * Default value is `true` - -Allow duplcate values on the source field. - -[id="{version}-plugins-{type}s-{plugin}-max_age"] -===== `max_age` - - * Value type is <> - * Default value is `5` - -The maximum age an event can be (in seconds) before it is automatically -flushed. - -[id="{version}-plugins-{type}s-{plugin}-negate"] -===== `negate` - - * Value type is <> - * Default value is `false` - -Negate the regexp pattern ('if not matched') - -[id="{version}-plugins-{type}s-{plugin}-pattern"] -===== `pattern` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The expression to match. The same matching engine as the -<> is used, so the expression can contain -a plain regular expression or one that also contains grok patterns. - -[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] -===== `patterns_dir` - - * Value type is <> - * Default value is `[]` - -Logstash ships by default with a bunch of patterns, so you don't -necessarily need to define this yourself unless you are adding additional -patterns. - -Pattern files are plain text with format: -[source,ruby] - NAME PATTERN - -For example: -[source,ruby] - NUMBER \d+ - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The field name to execute the pattern match on. - -[id="{version}-plugins-{type}s-{plugin}-stream_identity"] -===== `stream_identity` - - * Value type is <> - * Default value is `"%{host}.%{path}.%{type}"` - -The stream identity is how the multiline filter determines which stream an -event belongs to. This is generally used for differentiating, say, events -coming from multiple files in the same file input, or multiple connections -coming from a tcp input. - -The default value here is usually what you want, but there are some cases -where you want to change it. One such example is if you are using a tcp -input with only one client connecting at any time. If that client -reconnects (due to error or client restart), then logstash will identify -the new connection as a new stream and break any multiline goodness that -may have occurred between the old and new connection. To solve this use -case, you can use `%{@source_host}.%{@type}` instead. - -[id="{version}-plugins-{type}s-{plugin}-what"] -===== `what` - - * This is a required setting. - * Value can be any of: `previous`, `next` - * There is no default value for this setting. - -If the pattern matched, does event belong to the next or previous event? - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/multiline-v3.0.4.asciidoc b/docs/versioned-plugins/filters/multiline-v3.0.4.asciidoc deleted file mode 100644 index 6fe0f4e0d..000000000 --- a/docs/versioned-plugins/filters/multiline-v3.0.4.asciidoc +++ /dev/null @@ -1,194 +0,0 @@ -:plugin: multiline -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-multiline/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Multiline filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - -This filter will collapse multiline messages from a single source into one Logstash event. - -The original goal of this filter was to allow joining of multi-line messages -from files into a single event. For example - joining java exception and -stacktrace messages into a single event. - -NOTE: This filter will not work with multiple worker threads `-w 2` on the logstash command line. - -The config looks like this: -[source,ruby] - filter { - multiline { - pattern => "pattern, a regexp" - negate => boolean - what => "previous" or "next" - } - } - -The `pattern` should be a regexp (<> patterns are -supported) which matches what you believe to be an indicator that the field -is part of an event consisting of multiple lines of log data. - -The `what` must be `previous` or `next` and indicates the relation -to the multi-line event. - -The `negate` can be `true` or `false` (defaults to `false`). If `true`, a -message not matching the pattern will constitute a match of the multiline -filter and the `what` will be applied. (vice-versa is also true) - -For example, Java stack traces are multiline and usually have the message -starting at the far-left, with each subsequent line indented. Do this: -[source,ruby] - filter { - multiline { - pattern => "^\s" - what => "previous" - } - } - -This says that any line starting with whitespace belongs to the previous line. - -Another example is C line continuations (backslash). Here's how to do that: -[source,ruby] - filter { - multiline { - pattern => "\\$" - what => "next" - } - } - -This says that any line ending with a backslash should be combined with the -following line. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Multiline Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-allow_duplicates>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_age>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pattern>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-stream_identity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-what>> |<>, one of `["previous", "next"]`|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-allow_duplicates"] -===== `allow_duplicates` - - * Value type is <> - * Default value is `true` - -Allow duplcate values on the source field. - -[id="{version}-plugins-{type}s-{plugin}-max_age"] -===== `max_age` - - * Value type is <> - * Default value is `5` - -The maximum age an event can be (in seconds) before it is automatically -flushed. - -[id="{version}-plugins-{type}s-{plugin}-negate"] -===== `negate` - - * Value type is <> - * Default value is `false` - -Negate the regexp pattern ('if not matched') - -[id="{version}-plugins-{type}s-{plugin}-pattern"] -===== `pattern` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The expression to match. The same matching engine as the -<> is used, so the expression can contain -a plain regular expression or one that also contains grok patterns. - -[id="{version}-plugins-{type}s-{plugin}-patterns_dir"] -===== `patterns_dir` - - * Value type is <> - * Default value is `[]` - -Logstash ships by default with a bunch of patterns, so you don't -necessarily need to define this yourself unless you are adding additional -patterns. - -Pattern files are plain text with format: -[source,ruby] - NAME PATTERN - -For example: -[source,ruby] - NUMBER \d+ - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The field name to execute the pattern match on. - -[id="{version}-plugins-{type}s-{plugin}-stream_identity"] -===== `stream_identity` - - * Value type is <> - * Default value is `"%{host}.%{path}.%{type}"` - -The stream identity is how the multiline filter determines which stream an -event belongs to. This is generally used for differentiating, say, events -coming from multiple files in the same file input, or multiple connections -coming from a tcp input. - -The default value here is usually what you want, but there are some cases -where you want to change it. One such example is if you are using a tcp -input with only one client connecting at any time. If that client -reconnects (due to error or client restart), then logstash will identify -the new connection as a new stream and break any multiline goodness that -may have occurred between the old and new connection. To solve this use -case, you can use `%{@source_host}.%{@type}` instead. - -[id="{version}-plugins-{type}s-{plugin}-what"] -===== `what` - - * This is a required setting. - * Value can be any of: `previous`, `next` - * There is no default value for this setting. - -If the pattern matched, does event belong to the next or previous event? - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/oui-index.asciidoc b/docs/versioned-plugins/filters/oui-index.asciidoc deleted file mode 100644 index 83bebcbef..000000000 --- a/docs/versioned-plugins/filters/oui-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: oui -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::oui-v3.0.2.asciidoc[] -include::oui-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/oui-v3.0.1.asciidoc b/docs/versioned-plugins/filters/oui-v3.0.1.asciidoc deleted file mode 100644 index c82cc5374..000000000 --- a/docs/versioned-plugins/filters/oui-v3.0.1.asciidoc +++ /dev/null @@ -1,70 +0,0 @@ -:plugin: oui -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-oui/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Oui filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Logstash filter to parse OUI data from MAC addresses - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Oui Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -Setting the config_name here is required. This is how you -configure this filter from your Logstash config. - -filter { - example { - message => "My message..." - } -} - -The source field to parse - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"oui"` - -The target field to place all the data - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/oui-v3.0.2.asciidoc b/docs/versioned-plugins/filters/oui-v3.0.2.asciidoc deleted file mode 100644 index fb8e50dbe..000000000 --- a/docs/versioned-plugins/filters/oui-v3.0.2.asciidoc +++ /dev/null @@ -1,70 +0,0 @@ -:plugin: oui -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-oui/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Oui filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Logstash filter to parse OUI data from MAC addresses - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Oui Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -Setting the config_name here is required. This is how you -configure this filter from your Logstash config. - -filter { - example { - message => "My message..." - } -} - -The source field to parse - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"oui"` - -The target field to place all the data - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/prune-index.asciidoc b/docs/versioned-plugins/filters/prune-index.asciidoc deleted file mode 100644 index e9fe878f6..000000000 --- a/docs/versioned-plugins/filters/prune-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: prune -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::prune-v3.0.3.asciidoc[] -include::prune-v3.0.2.asciidoc[] -include::prune-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/prune-v3.0.1.asciidoc b/docs/versioned-plugins/filters/prune-v3.0.1.asciidoc deleted file mode 100644 index b20b96a53..000000000 --- a/docs/versioned-plugins/filters/prune-v3.0.1.asciidoc +++ /dev/null @@ -1,154 +0,0 @@ -:plugin: prune -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-prune/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Prune filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The prune filter is for removing fields from events based on -whitelists or blacklist of field names or their values (names and -values can also be regular expressions). - -This can e.g. be useful if you have a <> -or <> filter that creates a number of fields -with names that you don't necessarily know the names of beforehand, -and you only want to keep a subset of them. - -Usage help: -To specify a exact field name or value use the regular expression syntax `^some_name_or_value$`. -Example usage: Input data `{ "msg":"hello world", "msg_short":"hw" }` -[source,ruby] - filter { - prune { - whitelist_names => [ "msg" ] - } - } -Allows both `"msg"` and `"msg_short"` through. - -While: -[source,ruby] - filter { - prune { - whitelist_names => ["^msg$"] - } - } -Allows only `"msg"` through. - -Logstash stores an event's `tags` as a field which is subject to pruning. Remember to `whitelist_names => [ "^tags$" ]` -to maintain `tags` after pruning or use `blacklist_values => [ "^tag_name$" ]` to eliminate a specific `tag`. - -NOTE: This filter currently only support operations on top-level fields, -i.e. whitelisting and blacklisting of subfields based on name or value -does not work. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Prune Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-blacklist_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-blacklist_values>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interpolate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-whitelist_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-whitelist_values>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-blacklist_names"] -===== `blacklist_names` - - * Value type is <> - * Default value is `["%{[^}]+}"]` - -Exclude fields whose names match specified regexps, by default exclude unresolved `%{field}` strings. -[source,ruby] - filter { - prune { - blacklist_names => [ "method", "(referrer|status)", "${some}_field" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-blacklist_values"] -===== `blacklist_values` - - * Value type is <> - * Default value is `{}` - -Exclude specified fields if their values match one of the supplied regular expressions. -In case field values are arrays, each array item is matched against the regular expressions and matching array items will be excluded. -[source,ruby] - filter { - prune { - blacklist_values => [ "uripath", "/index.php", - "method", "(HEAD|OPTIONS)", - "status", "^[^2]" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-interpolate"] -===== `interpolate` - - * Value type is <> - * Default value is `false` - -Trigger whether configuration fields and values should be interpolated for -dynamic values (when resolving `%{some_field}`). -Probably adds some performance overhead. Defaults to false. - -[id="{version}-plugins-{type}s-{plugin}-whitelist_names"] -===== `whitelist_names` - - * Value type is <> - * Default value is `[]` - -Include only fields only if their names match specified regexps, default to empty list which means include everything. -[source,ruby] - filter { - prune { - whitelist_names => [ "method", "(referrer|status)", "${some}_field" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-whitelist_values"] -===== `whitelist_values` - - * Value type is <> - * Default value is `{}` - -Include specified fields only if their values match one of the supplied regular expressions. -In case field values are arrays, each array item is matched against the regular expressions and only matching array items will be included. -[source,ruby] - filter { - prune { - whitelist_values => [ "uripath", "/index.php", - "method", "(GET|POST)", - "status", "^[^2]" ] - } - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/prune-v3.0.2.asciidoc b/docs/versioned-plugins/filters/prune-v3.0.2.asciidoc deleted file mode 100644 index 4aa7058ef..000000000 --- a/docs/versioned-plugins/filters/prune-v3.0.2.asciidoc +++ /dev/null @@ -1,154 +0,0 @@ -:plugin: prune -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-prune/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Prune filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The prune filter is for removing fields from events based on -whitelists or blacklist of field names or their values (names and -values can also be regular expressions). - -This can e.g. be useful if you have a <> -or <> filter that creates a number of fields -with names that you don't necessarily know the names of beforehand, -and you only want to keep a subset of them. - -Usage help: -To specify a exact field name or value use the regular expression syntax `^some_name_or_value$`. -Example usage: Input data `{ "msg":"hello world", "msg_short":"hw" }` -[source,ruby] - filter { - prune { - whitelist_names => [ "msg" ] - } - } -Allows both `"msg"` and `"msg_short"` through. - -While: -[source,ruby] - filter { - prune { - whitelist_names => ["^msg$"] - } - } -Allows only `"msg"` through. - -Logstash stores an event's `tags` as a field which is subject to pruning. Remember to `whitelist_names => [ "^tags$" ]` -to maintain `tags` after pruning or use `blacklist_values => [ "^tag_name$" ]` to eliminate a specific `tag`. - -NOTE: This filter currently only support operations on top-level fields, -i.e. whitelisting and blacklisting of subfields based on name or value -does not work. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Prune Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-blacklist_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-blacklist_values>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interpolate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-whitelist_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-whitelist_values>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-blacklist_names"] -===== `blacklist_names` - - * Value type is <> - * Default value is `["%{[^}]+}"]` - -Exclude fields whose names match specified regexps, by default exclude unresolved `%{field}` strings. -[source,ruby] - filter { - prune { - blacklist_names => [ "method", "(referrer|status)", "${some}_field" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-blacklist_values"] -===== `blacklist_values` - - * Value type is <> - * Default value is `{}` - -Exclude specified fields if their values match one of the supplied regular expressions. -In case field values are arrays, each array item is matched against the regular expressions and matching array items will be excluded. -[source,ruby] - filter { - prune { - blacklist_values => [ "uripath", "/index.php", - "method", "(HEAD|OPTIONS)", - "status", "^[^2]" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-interpolate"] -===== `interpolate` - - * Value type is <> - * Default value is `false` - -Trigger whether configuration fields and values should be interpolated for -dynamic values (when resolving `%{some_field}`). -Probably adds some performance overhead. Defaults to false. - -[id="{version}-plugins-{type}s-{plugin}-whitelist_names"] -===== `whitelist_names` - - * Value type is <> - * Default value is `[]` - -Include only fields only if their names match specified regexps, default to empty list which means include everything. -[source,ruby] - filter { - prune { - whitelist_names => [ "method", "(referrer|status)", "${some}_field" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-whitelist_values"] -===== `whitelist_values` - - * Value type is <> - * Default value is `{}` - -Include specified fields only if their values match one of the supplied regular expressions. -In case field values are arrays, each array item is matched against the regular expressions and only matching array items will be included. -[source,ruby] - filter { - prune { - whitelist_values => [ "uripath", "/index.php", - "method", "(GET|POST)", - "status", "^[^2]" ] - } - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/prune-v3.0.3.asciidoc b/docs/versioned-plugins/filters/prune-v3.0.3.asciidoc deleted file mode 100644 index b6203fb98..000000000 --- a/docs/versioned-plugins/filters/prune-v3.0.3.asciidoc +++ /dev/null @@ -1,154 +0,0 @@ -:plugin: prune -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-prune/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Prune filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The prune filter is for removing fields from events based on -whitelists or blacklist of field names or their values (names and -values can also be regular expressions). - -This can e.g. be useful if you have a <> -or <> filter that creates a number of fields -with names that you don't necessarily know the names of beforehand, -and you only want to keep a subset of them. - -Usage help: -To specify a exact field name or value use the regular expression syntax `^some_name_or_value$`. -Example usage: Input data `{ "msg":"hello world", "msg_short":"hw" }` -[source,ruby] - filter { - prune { - whitelist_names => [ "msg" ] - } - } -Allows both `"msg"` and `"msg_short"` through. - -While: -[source,ruby] - filter { - prune { - whitelist_names => ["^msg$"] - } - } -Allows only `"msg"` through. - -Logstash stores an event's `tags` as a field which is subject to pruning. Remember to `whitelist_names => [ "^tags$" ]` -to maintain `tags` after pruning or use `blacklist_values => [ "^tag_name$" ]` to eliminate a specific `tag`. - -NOTE: This filter currently only support operations on top-level fields, -i.e. whitelisting and blacklisting of subfields based on name or value -does not work. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Prune Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-blacklist_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-blacklist_values>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interpolate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-whitelist_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-whitelist_values>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-blacklist_names"] -===== `blacklist_names` - - * Value type is <> - * Default value is `["%{[^}]+}"]` - -Exclude fields whose names match specified regexps, by default exclude unresolved `%{field}` strings. -[source,ruby] - filter { - prune { - blacklist_names => [ "method", "(referrer|status)", "${some}_field" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-blacklist_values"] -===== `blacklist_values` - - * Value type is <> - * Default value is `{}` - -Exclude specified fields if their values match one of the supplied regular expressions. -In case field values are arrays, each array item is matched against the regular expressions and matching array items will be excluded. -[source,ruby] - filter { - prune { - blacklist_values => [ "uripath", "/index.php", - "method", "(HEAD|OPTIONS)", - "status", "^[^2]" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-interpolate"] -===== `interpolate` - - * Value type is <> - * Default value is `false` - -Trigger whether configuration fields and values should be interpolated for -dynamic values (when resolving `%{some_field}`). -Probably adds some performance overhead. Defaults to false. - -[id="{version}-plugins-{type}s-{plugin}-whitelist_names"] -===== `whitelist_names` - - * Value type is <> - * Default value is `[]` - -Include only fields only if their names match specified regexps, default to empty list which means include everything. -[source,ruby] - filter { - prune { - whitelist_names => [ "method", "(referrer|status)", "${some}_field" ] - } - } - -[id="{version}-plugins-{type}s-{plugin}-whitelist_values"] -===== `whitelist_values` - - * Value type is <> - * Default value is `{}` - -Include specified fields only if their values match one of the supplied regular expressions. -In case field values are arrays, each array item is matched against the regular expressions and only matching array items will be included. -[source,ruby] - filter { - prune { - whitelist_values => [ "uripath", "/index.php", - "method", "(GET|POST)", - "status", "^[^2]" ] - } - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/punct-index.asciidoc b/docs/versioned-plugins/filters/punct-index.asciidoc deleted file mode 100644 index f58b661b6..000000000 --- a/docs/versioned-plugins/filters/punct-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: punct -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::punct-v2.0.6.asciidoc[] -include::punct-v2.0.5.asciidoc[] - diff --git a/docs/versioned-plugins/filters/punct-v2.0.5.asciidoc b/docs/versioned-plugins/filters/punct-v2.0.5.asciidoc deleted file mode 100644 index d7e75fe4e..000000000 --- a/docs/versioned-plugins/filters/punct-v2.0.5.asciidoc +++ /dev/null @@ -1,62 +0,0 @@ -:plugin: punct -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-punct/blob/v2.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Punct filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Strip everything but punctuation from a field and store the remainder in the -a separate field. This is often used for fingerprinting log events. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Punct Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The field reference to use for punctuation stripping - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"punct"` - -The field to store the result. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/punct-v2.0.6.asciidoc b/docs/versioned-plugins/filters/punct-v2.0.6.asciidoc deleted file mode 100644 index 6386463c3..000000000 --- a/docs/versioned-plugins/filters/punct-v2.0.6.asciidoc +++ /dev/null @@ -1,62 +0,0 @@ -:plugin: punct -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.6 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-punct/blob/v2.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Punct filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Strip everything but punctuation from a field and store the remainder in the -a separate field. This is often used for fingerprinting log events. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Punct Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -The field reference to use for punctuation stripping - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"punct"` - -The field to store the result. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/range-index.asciidoc b/docs/versioned-plugins/filters/range-index.asciidoc deleted file mode 100644 index b001173c2..000000000 --- a/docs/versioned-plugins/filters/range-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: range -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::range-v3.0.3.asciidoc[] -include::range-v3.0.2.asciidoc[] -include::range-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/range-v3.0.1.asciidoc b/docs/versioned-plugins/filters/range-v3.0.1.asciidoc deleted file mode 100644 index 65b94be6a..000000000 --- a/docs/versioned-plugins/filters/range-v3.0.1.asciidoc +++ /dev/null @@ -1,89 +0,0 @@ -:plugin: range -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-range/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Range filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter is used to check that certain fields are within expected size/length ranges. -Supported types are numbers and strings. -Numbers are checked to be within numeric value range. -Strings are checked to be within string length range. -More than one range can be specified for same fieldname, actions will be applied incrementally. -When field value is within a specified range an action will be taken. -Supported actions are drop event, add tag, or add field with specified value. - -Example use cases are for histogram-like tagging of events -or for finding anomaly values in fields or too big events that should be dropped. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Range Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ranges>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-negate"] -===== `negate` - - * Value type is <> - * Default value is `false` - -Negate the range match logic, events should be outsize of the specified range to match. - -[id="{version}-plugins-{type}s-{plugin}-ranges"] -===== `ranges` - - * Value type is <> - * Default value is `[]` - -An array of field, min, max, action tuples. -Example: -[source,ruby] - filter { - range { - ranges => [ "message", 0, 10, "tag:short", - "message", 11, 100, "tag:medium", - "message", 101, 1000, "tag:long", - "message", 1001, 1e1000, "drop", - "duration", 0, 100, "field:latency:fast", - "duration", 101, 200, "field:latency:normal", - "duration", 201, 1000, "field:latency:slow", - "duration", 1001, 1e1000, "field:latency:outlier", - "requests", 0, 10, "tag:too_few_%{host}_requests" ] - } - } - -Supported actions are drop tag or field with specified value. -Added tag names and field names and field values can have `%{dynamic}` values. - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/range-v3.0.2.asciidoc b/docs/versioned-plugins/filters/range-v3.0.2.asciidoc deleted file mode 100644 index da8bc0e56..000000000 --- a/docs/versioned-plugins/filters/range-v3.0.2.asciidoc +++ /dev/null @@ -1,89 +0,0 @@ -:plugin: range -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-range/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Range filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter is used to check that certain fields are within expected size/length ranges. -Supported types are numbers and strings. -Numbers are checked to be within numeric value range. -Strings are checked to be within string length range. -More than one range can be specified for same fieldname, actions will be applied incrementally. -When field value is within a specified range an action will be taken. -Supported actions are drop event, add tag, or add field with specified value. - -Example use cases are for histogram-like tagging of events -or for finding anomaly values in fields or too big events that should be dropped. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Range Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ranges>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-negate"] -===== `negate` - - * Value type is <> - * Default value is `false` - -Negate the range match logic, events should be outsize of the specified range to match. - -[id="{version}-plugins-{type}s-{plugin}-ranges"] -===== `ranges` - - * Value type is <> - * Default value is `[]` - -An array of field, min, max, action tuples. -Example: -[source,ruby] - filter { - range { - ranges => [ "message", 0, 10, "tag:short", - "message", 11, 100, "tag:medium", - "message", 101, 1000, "tag:long", - "message", 1001, 1e1000, "drop", - "duration", 0, 100, "field:latency:fast", - "duration", 101, 200, "field:latency:normal", - "duration", 201, 1000, "field:latency:slow", - "duration", 1001, 1e1000, "field:latency:outlier", - "requests", 0, 10, "tag:too_few_%{host}_requests" ] - } - } - -Supported actions are drop tag or field with specified value. -Added tag names and field names and field values can have `%{dynamic}` values. - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/range-v3.0.3.asciidoc b/docs/versioned-plugins/filters/range-v3.0.3.asciidoc deleted file mode 100644 index 524e9e30c..000000000 --- a/docs/versioned-plugins/filters/range-v3.0.3.asciidoc +++ /dev/null @@ -1,89 +0,0 @@ -:plugin: range -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-range/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Range filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This filter is used to check that certain fields are within expected size/length ranges. -Supported types are numbers and strings. -Numbers are checked to be within numeric value range. -Strings are checked to be within string length range. -More than one range can be specified for same fieldname, actions will be applied incrementally. -When field value is within a specified range an action will be taken. -Supported actions are drop event, add tag, or add field with specified value. - -Example use cases are for histogram-like tagging of events -or for finding anomaly values in fields or too big events that should be dropped. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Range Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-negate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ranges>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-negate"] -===== `negate` - - * Value type is <> - * Default value is `false` - -Negate the range match logic, events should be outsize of the specified range to match. - -[id="{version}-plugins-{type}s-{plugin}-ranges"] -===== `ranges` - - * Value type is <> - * Default value is `[]` - -An array of field, min, max, action tuples. -Example: -[source,ruby] - filter { - range { - ranges => [ "message", 0, 10, "tag:short", - "message", 11, 100, "tag:medium", - "message", 101, 1000, "tag:long", - "message", 1001, 1e1000, "drop", - "duration", 0, 100, "field:latency:fast", - "duration", 101, 200, "field:latency:normal", - "duration", 201, 1000, "field:latency:slow", - "duration", 1001, 1e1000, "field:latency:outlier", - "requests", 0, 10, "tag:too_few_%{host}_requests" ] - } - } - -Supported actions are drop tag or field with specified value. -Added tag names and field names and field values can have `%{dynamic}` values. - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/script-index.asciidoc b/docs/versioned-plugins/filters/script-index.asciidoc deleted file mode 100644 index 30723eb68..000000000 --- a/docs/versioned-plugins/filters/script-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: script -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/filters/split-index.asciidoc b/docs/versioned-plugins/filters/split-index.asciidoc deleted file mode 100644 index c93ed6dd2..000000000 --- a/docs/versioned-plugins/filters/split-index.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -:plugin: split -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-12-11 -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-07-26 -| <> | 2017-06-23 -|======================================================================= - -include::split-v3.1.6.asciidoc[] -include::split-v3.1.5.asciidoc[] -include::split-v3.1.4.asciidoc[] -include::split-v3.1.3.asciidoc[] -include::split-v3.1.2.asciidoc[] - diff --git a/docs/versioned-plugins/filters/split-v3.1.2.asciidoc b/docs/versioned-plugins/filters/split-v3.1.2.asciidoc deleted file mode 100644 index c7ca96658..000000000 --- a/docs/versioned-plugins/filters/split-v3.1.2.asciidoc +++ /dev/null @@ -1,111 +0,0 @@ -:plugin: split -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-split/blob/v3.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Split filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The split filter clones an event by splitting one of its fields and -placing each value resulting from the split into a clone of the original -event. The field being split can either be a string or an array. - -An example use case of this filter is for taking output from the -<> which emits one event for -the whole output of a command and splitting that output by newline - -making each line an event. - -Split filter can also be used to split array fields in events into individual events. -A very common pattern in JSON & XML is to make use of lists to group data together. - -For example, a json structure like this: - -[source,js] ----------------------------------- -{ field1: ..., - results: [ - { result ... }, - { result ... }, - { result ... }, - ... -] } ----------------------------------- - -The split filter can be used on the above data to create separate events for each value of `results` field - -[source,js] ----------------------------------- -filter { - split { - field => "results" - } -} ----------------------------------- - -The end result of each split is a complete copy of the event -with only the current split section of the given field changed. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Split Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-terminator>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-field"] -===== `field` - - * Value type is <> - * Default value is `"message"` - -The field which value is split by the terminator. -Can be a multiline message or the ID of an array. -Nested arrays are referenced like: "[object_id][array_id]" - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -The field within the new event which the value is split into. -If not set, the target field defaults to split field name. - -[id="{version}-plugins-{type}s-{plugin}-terminator"] -===== `terminator` - - * Value type is <> - * Default value is `"\n"` - -The string to split on. This is usually a line terminator, but can be any -string. If you are splitting a JSON array into multiple events, you can ignore this field. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/split-v3.1.3.asciidoc b/docs/versioned-plugins/filters/split-v3.1.3.asciidoc deleted file mode 100644 index 57b54043c..000000000 --- a/docs/versioned-plugins/filters/split-v3.1.3.asciidoc +++ /dev/null @@ -1,111 +0,0 @@ -:plugin: split -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-07-26 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-split/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Split filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The split filter clones an event by splitting one of its fields and -placing each value resulting from the split into a clone of the original -event. The field being split can either be a string or an array. - -An example use case of this filter is for taking output from the -<> which emits one event for -the whole output of a command and splitting that output by newline - -making each line an event. - -Split filter can also be used to split array fields in events into individual events. -A very common pattern in JSON & XML is to make use of lists to group data together. - -For example, a json structure like this: - -[source,js] ----------------------------------- -{ field1: ..., - results: [ - { result ... }, - { result ... }, - { result ... }, - ... -] } ----------------------------------- - -The split filter can be used on the above data to create separate events for each value of `results` field - -[source,js] ----------------------------------- -filter { - split { - field => "results" - } -} ----------------------------------- - -The end result of each split is a complete copy of the event -with only the current split section of the given field changed. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Split Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-terminator>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-field"] -===== `field` - - * Value type is <> - * Default value is `"message"` - -The field which value is split by the terminator. -Can be a multiline message or the ID of an array. -Nested arrays are referenced like: "[object_id][array_id]" - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -The field within the new event which the value is split into. -If not set, the target field defaults to split field name. - -[id="{version}-plugins-{type}s-{plugin}-terminator"] -===== `terminator` - - * Value type is <> - * Default value is `"\n"` - -The string to split on. This is usually a line terminator, but can be any -string. If you are splitting a JSON array into multiple events, you can ignore this field. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/split-v3.1.4.asciidoc b/docs/versioned-plugins/filters/split-v3.1.4.asciidoc deleted file mode 100644 index b64e1215b..000000000 --- a/docs/versioned-plugins/filters/split-v3.1.4.asciidoc +++ /dev/null @@ -1,111 +0,0 @@ -:plugin: split -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-split/blob/v3.1.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Split filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The split filter clones an event by splitting one of its fields and -placing each value resulting from the split into a clone of the original -event. The field being split can either be a string or an array. - -An example use case of this filter is for taking output from the -<> which emits one event for -the whole output of a command and splitting that output by newline - -making each line an event. - -Split filter can also be used to split array fields in events into individual events. -A very common pattern in JSON & XML is to make use of lists to group data together. - -For example, a json structure like this: - -[source,js] ----------------------------------- -{ field1: ..., - results: [ - { result ... }, - { result ... }, - { result ... }, - ... -] } ----------------------------------- - -The split filter can be used on the above data to create separate events for each value of `results` field - -[source,js] ----------------------------------- -filter { - split { - field => "results" - } -} ----------------------------------- - -The end result of each split is a complete copy of the event -with only the current split section of the given field changed. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Split Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-terminator>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-field"] -===== `field` - - * Value type is <> - * Default value is `"message"` - -The field which value is split by the terminator. -Can be a multiline message or the ID of an array. -Nested arrays are referenced like: "[object_id][array_id]" - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -The field within the new event which the value is split into. -If not set, the target field defaults to split field name. - -[id="{version}-plugins-{type}s-{plugin}-terminator"] -===== `terminator` - - * Value type is <> - * Default value is `"\n"` - -The string to split on. This is usually a line terminator, but can be any -string. If you are splitting a JSON array into multiple events, you can ignore this field. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/split-v3.1.5.asciidoc b/docs/versioned-plugins/filters/split-v3.1.5.asciidoc deleted file mode 100644 index e3ad88491..000000000 --- a/docs/versioned-plugins/filters/split-v3.1.5.asciidoc +++ /dev/null @@ -1,111 +0,0 @@ -:plugin: split -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-split/blob/v3.1.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Split filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The split filter clones an event by splitting one of its fields and -placing each value resulting from the split into a clone of the original -event. The field being split can either be a string or an array. - -An example use case of this filter is for taking output from the -<> which emits one event for -the whole output of a command and splitting that output by newline - -making each line an event. - -Split filter can also be used to split array fields in events into individual events. -A very common pattern in JSON & XML is to make use of lists to group data together. - -For example, a json structure like this: - -[source,js] ----------------------------------- -{ field1: ..., - results: [ - { result ... }, - { result ... }, - { result ... }, - ... -] } ----------------------------------- - -The split filter can be used on the above data to create separate events for each value of `results` field - -[source,js] ----------------------------------- -filter { - split { - field => "results" - } -} ----------------------------------- - -The end result of each split is a complete copy of the event -with only the current split section of the given field changed. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Split Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-terminator>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-field"] -===== `field` - - * Value type is <> - * Default value is `"message"` - -The field which value is split by the terminator. -Can be a multiline message or the ID of an array. -Nested arrays are referenced like: "[object_id][array_id]" - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -The field within the new event which the value is split into. -If not set, the target field defaults to split field name. - -[id="{version}-plugins-{type}s-{plugin}-terminator"] -===== `terminator` - - * Value type is <> - * Default value is `"\n"` - -The string to split on. This is usually a line terminator, but can be any -string. If you are splitting a JSON array into multiple events, you can ignore this field. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/split-v3.1.6.asciidoc b/docs/versioned-plugins/filters/split-v3.1.6.asciidoc deleted file mode 100644 index 3cb704737..000000000 --- a/docs/versioned-plugins/filters/split-v3.1.6.asciidoc +++ /dev/null @@ -1,111 +0,0 @@ -:plugin: split -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.6 -:release_date: 2017-12-11 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-split/blob/v3.1.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Split filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The split filter clones an event by splitting one of its fields and -placing each value resulting from the split into a clone of the original -event. The field being split can either be a string or an array. - -An example use case of this filter is for taking output from the -<> which emits one event for -the whole output of a command and splitting that output by newline - -making each line an event. - -Split filter can also be used to split array fields in events into individual events. -A very common pattern in JSON & XML is to make use of lists to group data together. - -For example, a json structure like this: - -[source,js] ----------------------------------- -{ field1: ..., - results: [ - { result ... }, - { result ... }, - { result ... }, - ... -] } ----------------------------------- - -The split filter can be used on the above data to create separate events for each value of `results` field - -[source,js] ----------------------------------- -filter { - split { - field => "results" - } -} ----------------------------------- - -The end result of each split is a complete copy of the event -with only the current split section of the given field changed. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Split Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-terminator>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-field"] -===== `field` - - * Value type is <> - * Default value is `"message"` - -The field which value is split by the terminator. -Can be a multiline message or the ID of an array. -Nested arrays are referenced like: "[object_id][array_id]" - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -The field within the new event which the value is split into. -If not set, the target field defaults to split field name. - -[id="{version}-plugins-{type}s-{plugin}-terminator"] -===== `terminator` - - * Value type is <> - * Default value is `"\n"` - -The string to split on. This is usually a line terminator, but can be any -string. If you are splitting a JSON array into multiple events, you can ignore this field. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/syslog_pri-index.asciidoc b/docs/versioned-plugins/filters/syslog_pri-index.asciidoc deleted file mode 100644 index cdb40ada2..000000000 --- a/docs/versioned-plugins/filters/syslog_pri-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: syslog_pri -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::syslog_pri-v3.0.5.asciidoc[] -include::syslog_pri-v3.0.4.asciidoc[] -include::syslog_pri-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/filters/syslog_pri-v3.0.3.asciidoc b/docs/versioned-plugins/filters/syslog_pri-v3.0.3.asciidoc deleted file mode 100644 index b71b1e7b7..000000000 --- a/docs/versioned-plugins/filters/syslog_pri-v3.0.3.asciidoc +++ /dev/null @@ -1,85 +0,0 @@ -:plugin: syslog_pri -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-syslog_pri/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Syslog_pri filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Filter plugin for logstash to parse the `PRI` field from the front -of a Syslog (RFC3164) message. If no priority is set, it will -default to 13 (per RFC). - -This filter is based on the original `syslog.rb` code shipped -with logstash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Syslog_pri Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-facility_labels>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-severity_labels>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-syslog_pri_field_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-facility_labels"] -===== `facility_labels` - - * Value type is <> - * Default value is `["kernel", "user-level", "mail", "daemon", "security/authorization", "syslogd", "line printer", "network news", "uucp", "clock", "security/authorization", "ftp", "ntp", "log audit", "log alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"]` - -Labels for facility levels. This comes from RFC3164. - -[id="{version}-plugins-{type}s-{plugin}-severity_labels"] -===== `severity_labels` - - * Value type is <> - * Default value is `["emergency", "alert", "critical", "error", "warning", "notice", "informational", "debug"]` - -Labels for severity levels. This comes from RFC3164. - -[id="{version}-plugins-{type}s-{plugin}-syslog_pri_field_name"] -===== `syslog_pri_field_name` - - * Value type is <> - * Default value is `"syslog_pri"` - -Name of field which passes in the extracted PRI part of the syslog message - -[id="{version}-plugins-{type}s-{plugin}-use_labels"] -===== `use_labels` - - * Value type is <> - * Default value is `true` - -set the status to experimental/beta/stable -Add human-readable names after parsing severity and facility from PRI - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/syslog_pri-v3.0.4.asciidoc b/docs/versioned-plugins/filters/syslog_pri-v3.0.4.asciidoc deleted file mode 100644 index 5a41e66a2..000000000 --- a/docs/versioned-plugins/filters/syslog_pri-v3.0.4.asciidoc +++ /dev/null @@ -1,85 +0,0 @@ -:plugin: syslog_pri -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-syslog_pri/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Syslog_pri filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Filter plugin for logstash to parse the `PRI` field from the front -of a Syslog (RFC3164) message. If no priority is set, it will -default to 13 (per RFC). - -This filter is based on the original `syslog.rb` code shipped -with logstash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Syslog_pri Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-facility_labels>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-severity_labels>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-syslog_pri_field_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-facility_labels"] -===== `facility_labels` - - * Value type is <> - * Default value is `["kernel", "user-level", "mail", "daemon", "security/authorization", "syslogd", "line printer", "network news", "uucp", "clock", "security/authorization", "ftp", "ntp", "log audit", "log alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"]` - -Labels for facility levels. This comes from RFC3164. - -[id="{version}-plugins-{type}s-{plugin}-severity_labels"] -===== `severity_labels` - - * Value type is <> - * Default value is `["emergency", "alert", "critical", "error", "warning", "notice", "informational", "debug"]` - -Labels for severity levels. This comes from RFC3164. - -[id="{version}-plugins-{type}s-{plugin}-syslog_pri_field_name"] -===== `syslog_pri_field_name` - - * Value type is <> - * Default value is `"syslog_pri"` - -Name of field which passes in the extracted PRI part of the syslog message - -[id="{version}-plugins-{type}s-{plugin}-use_labels"] -===== `use_labels` - - * Value type is <> - * Default value is `true` - -set the status to experimental/beta/stable -Add human-readable names after parsing severity and facility from PRI - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/syslog_pri-v3.0.5.asciidoc b/docs/versioned-plugins/filters/syslog_pri-v3.0.5.asciidoc deleted file mode 100644 index 9a07a466c..000000000 --- a/docs/versioned-plugins/filters/syslog_pri-v3.0.5.asciidoc +++ /dev/null @@ -1,85 +0,0 @@ -:plugin: syslog_pri -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-syslog_pri/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Syslog_pri filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Filter plugin for logstash to parse the `PRI` field from the front -of a Syslog (RFC3164) message. If no priority is set, it will -default to 13 (per RFC). - -This filter is based on the original `syslog.rb` code shipped -with logstash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Syslog_pri Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-facility_labels>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-severity_labels>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-syslog_pri_field_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-facility_labels"] -===== `facility_labels` - - * Value type is <> - * Default value is `["kernel", "user-level", "mail", "daemon", "security/authorization", "syslogd", "line printer", "network news", "uucp", "clock", "security/authorization", "ftp", "ntp", "log audit", "log alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"]` - -Labels for facility levels. This comes from RFC3164. - -[id="{version}-plugins-{type}s-{plugin}-severity_labels"] -===== `severity_labels` - - * Value type is <> - * Default value is `["emergency", "alert", "critical", "error", "warning", "notice", "informational", "debug"]` - -Labels for severity levels. This comes from RFC3164. - -[id="{version}-plugins-{type}s-{plugin}-syslog_pri_field_name"] -===== `syslog_pri_field_name` - - * Value type is <> - * Default value is `"syslog_pri"` - -Name of field which passes in the extracted PRI part of the syslog message - -[id="{version}-plugins-{type}s-{plugin}-use_labels"] -===== `use_labels` - - * Value type is <> - * Default value is `true` - -set the status to experimental/beta/stable -Add human-readable names after parsing severity and facility from PRI - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/throttle-index.asciidoc b/docs/versioned-plugins/filters/throttle-index.asciidoc deleted file mode 100644 index 4c91c2d02..000000000 --- a/docs/versioned-plugins/filters/throttle-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: throttle -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::throttle-v4.0.4.asciidoc[] -include::throttle-v4.0.3.asciidoc[] -include::throttle-v4.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/filters/throttle-v4.0.2.asciidoc b/docs/versioned-plugins/filters/throttle-v4.0.2.asciidoc deleted file mode 100644 index bb982f483..000000000 --- a/docs/versioned-plugins/filters/throttle-v4.0.2.asciidoc +++ /dev/null @@ -1,252 +0,0 @@ -:plugin: throttle -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-throttle/blob/v4.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Throttle filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The throttle filter is for throttling the number of events. The filter is -configured with a lower bound, the "before_count", and upper bound, the "after_count", -and a period of time. All events passing through the filter will be counted based on -their key and the event timestamp. As long as the count is less than the "before_count" -or greater than the "after_count", the event will be "throttled" which means the filter -will be considered successful and any tags or fields will be added (or removed). - -The plugin is thread-safe and properly tracks past events. - -For example, if you wanted to throttle events so you only receive an event after 2 -occurrences and you get no more than 3 in 10 minutes, you would use the configuration: -[source,ruby] - period => 600 - max_age => 1200 - before_count => 3 - after_count => 5 - -Which would result in: -========================== - event 1 - throttled (successful filter, period start) - event 2 - throttled (successful filter) - event 3 - not throttled - event 4 - not throttled - event 5 - not throttled - event 6 - throttled (successful filter) - event 7 - throttled (successful filter) - event x - throttled (successful filter) - period end - event 1 - throttled (successful filter, period start) - event 2 - throttled (successful filter) - event 3 - not throttled - event 4 - not throttled - event 5 - not throttled - event 6 - throttled (successful filter) - ... -========================== -Another example is if you wanted to throttle events so you only -receive 1 event per hour, you would use the configuration: -[source,ruby] - period => 3600 - max_age => 7200 - before_count => -1 - after_count => 1 - -Which would result in: -========================== - event 1 - not throttled (period start) - event 2 - throttled (successful filter) - event 3 - throttled (successful filter) - event 4 - throttled (successful filter) - event x - throttled (successful filter) - period end - event 1 - not throttled (period start) - event 2 - throttled (successful filter) - event 3 - throttled (successful filter) - event 4 - throttled (successful filter) - ... -========================== -A common use case would be to use the throttle filter to throttle events before 3 and -after 5 while using multiple fields for the key and then use the drop filter to remove -throttled events. This configuration might appear as: -[source,ruby] - filter { - throttle { - before_count => 3 - after_count => 5 - period => 3600 - max_age => 7200 - key => "%{host}%{message}" - add_tag => "throttled" - } - if "throttled" in [tags] { - drop { } - } - } - -Another case would be to store all events, but only email non-throttled events -so the op's inbox isn't flooded with emails in the event of a system error. -This configuration might appear as: -[source,ruby] - filter { - throttle { - before_count => 3 - after_count => 5 - period => 3600 - max_age => 7200 - key => "%{message}" - add_tag => "throttled" - } - } - output { - if "throttled" not in [tags] { - email { - from => "logstash@mycompany.com" - subject => "Production System Alert" - to => "ops@mycompany.com" - via => "sendmail" - body => "Alert on %{host} from path %{path}:\n\n%{message}" - options => { "location" => "/usr/sbin/sendmail" } - } - } - elasticsearch_http { - host => "localhost" - port => "19200" - } - } - -When an event is received, the event key is stored in a key_cache. The key references -a timeslot_cache. The event is allocated to a timeslot (created dynamically) based on -the timestamp of the event. The timeslot counter is incremented. When the next event is -received (same key), within the same "period", it is allocated to the same timeslot. -The timeslot counter is incremented once again. - -The timeslot expires if the maximum age has been exceeded. The age is calculated -based on the latest event timestamp and the max_age configuration option. - - ---[::.. DESIGN ..::]--- - -+- [key_cache] -+ +-- [timeslot_cache] --+ -| | | @created: 1439839636 | - | @latest: 1439839836 | - [a.b.c] => +----------------------+ - | [1439839636] => 1 | - | [1439839736] => 3 | - | [1439839836] => 2 | - +----------------------+ - - +-- [timeslot_cache] --+ - | @created: eeeeeeeeee | - | @latest: llllllllll | - [x.y.z] => +----------------------+ - | [0000000060] => x | - | [0000000120] => y | -| | | [..........] => N | -+---------------+ +----------------------+ - -Frank de Jong (@frapex) -Mike Pilone (@mikepilone) - -only update if greater than current - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Throttle Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-after_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-before_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-max_age>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_counters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-period>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-after_count"] -===== `after_count` - - * Value type is <> - * Default value is `-1` - -Events greater than this count will be throttled. Setting this value to -1, the -default, will cause no events to be throttled based on the upper bound. - -[id="{version}-plugins-{type}s-{plugin}-before_count"] -===== `before_count` - - * Value type is <> - * Default value is `-1` - -Events less than this count will be throttled. Setting this value to -1, the -default, will cause no events to be throttled based on the lower bound. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The key used to identify events. Events with the same key are grouped together. -Field substitutions are allowed, so you can combine multiple fields. - -[id="{version}-plugins-{type}s-{plugin}-max_age"] -===== `max_age` - - * Value type is <> - * Default value is `3600` - -The maximum age of a timeslot. Higher values allow better tracking of an asynchronous -flow of events, but require more memory. As a rule of thumb you should set this value -to at least twice the period. Or set this value to period + maximum time offset -between unordered events with the same key. Values below the specified period give -unexpected results if unordered events are processed simultaneously. - -[id="{version}-plugins-{type}s-{plugin}-max_counters"] -===== `max_counters` - - * Value type is <> - * Default value is `100000` - -The maximum number of counters to store before decreasing the maximum age of a timeslot. -Setting this value to -1 will prevent an upper bound with no constraint on the -number of counters. This configuration value should only be used as a memory -control mechanism and can cause early counter expiration if the value is reached. -It is recommended to leave the default value and ensure that your key is selected -such that it limits the number of counters required (i.e. don't use UUID as the key). - -[id="{version}-plugins-{type}s-{plugin}-period"] -===== `period` - - * Value type is <> - * Default value is `"60"` - -The period in seconds after the first occurrence of an event until a new timeslot -is created. This period is tracked per unique key and per timeslot. -Field substitutions are allowed in this value. This allows you to specify that -certain kinds of events throttle for a specific period of time. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/throttle-v4.0.3.asciidoc b/docs/versioned-plugins/filters/throttle-v4.0.3.asciidoc deleted file mode 100644 index 3fae882d4..000000000 --- a/docs/versioned-plugins/filters/throttle-v4.0.3.asciidoc +++ /dev/null @@ -1,252 +0,0 @@ -:plugin: throttle -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.3 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-throttle/blob/v4.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Throttle filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The throttle filter is for throttling the number of events. The filter is -configured with a lower bound, the "before_count", and upper bound, the "after_count", -and a period of time. All events passing through the filter will be counted based on -their key and the event timestamp. As long as the count is less than the "before_count" -or greater than the "after_count", the event will be "throttled" which means the filter -will be considered successful and any tags or fields will be added (or removed). - -The plugin is thread-safe and properly tracks past events. - -For example, if you wanted to throttle events so you only receive an event after 2 -occurrences and you get no more than 3 in 10 minutes, you would use the configuration: -[source,ruby] - period => 600 - max_age => 1200 - before_count => 3 - after_count => 5 - -Which would result in: -========================== - event 1 - throttled (successful filter, period start) - event 2 - throttled (successful filter) - event 3 - not throttled - event 4 - not throttled - event 5 - not throttled - event 6 - throttled (successful filter) - event 7 - throttled (successful filter) - event x - throttled (successful filter) - period end - event 1 - throttled (successful filter, period start) - event 2 - throttled (successful filter) - event 3 - not throttled - event 4 - not throttled - event 5 - not throttled - event 6 - throttled (successful filter) - ... -========================== -Another example is if you wanted to throttle events so you only -receive 1 event per hour, you would use the configuration: -[source,ruby] - period => 3600 - max_age => 7200 - before_count => -1 - after_count => 1 - -Which would result in: -========================== - event 1 - not throttled (period start) - event 2 - throttled (successful filter) - event 3 - throttled (successful filter) - event 4 - throttled (successful filter) - event x - throttled (successful filter) - period end - event 1 - not throttled (period start) - event 2 - throttled (successful filter) - event 3 - throttled (successful filter) - event 4 - throttled (successful filter) - ... -========================== -A common use case would be to use the throttle filter to throttle events before 3 and -after 5 while using multiple fields for the key and then use the drop filter to remove -throttled events. This configuration might appear as: -[source,ruby] - filter { - throttle { - before_count => 3 - after_count => 5 - period => 3600 - max_age => 7200 - key => "%{host}%{message}" - add_tag => "throttled" - } - if "throttled" in [tags] { - drop { } - } - } - -Another case would be to store all events, but only email non-throttled events -so the op's inbox isn't flooded with emails in the event of a system error. -This configuration might appear as: -[source,ruby] - filter { - throttle { - before_count => 3 - after_count => 5 - period => 3600 - max_age => 7200 - key => "%{message}" - add_tag => "throttled" - } - } - output { - if "throttled" not in [tags] { - email { - from => "logstash@mycompany.com" - subject => "Production System Alert" - to => "ops@mycompany.com" - via => "sendmail" - body => "Alert on %{host} from path %{path}:\n\n%{message}" - options => { "location" => "/usr/sbin/sendmail" } - } - } - elasticsearch_http { - host => "localhost" - port => "19200" - } - } - -When an event is received, the event key is stored in a key_cache. The key references -a timeslot_cache. The event is allocated to a timeslot (created dynamically) based on -the timestamp of the event. The timeslot counter is incremented. When the next event is -received (same key), within the same "period", it is allocated to the same timeslot. -The timeslot counter is incremented once again. - -The timeslot expires if the maximum age has been exceeded. The age is calculated -based on the latest event timestamp and the max_age configuration option. - - ---[::.. DESIGN ..::]--- - -+- [key_cache] -+ +-- [timeslot_cache] --+ -| | | @created: 1439839636 | - | @latest: 1439839836 | - [a.b.c] => +----------------------+ - | [1439839636] => 1 | - | [1439839736] => 3 | - | [1439839836] => 2 | - +----------------------+ - - +-- [timeslot_cache] --+ - | @created: eeeeeeeeee | - | @latest: llllllllll | - [x.y.z] => +----------------------+ - | [0000000060] => x | - | [0000000120] => y | -| | | [..........] => N | -+---------------+ +----------------------+ - -Frank de Jong (@frapex) -Mike Pilone (@mikepilone) - -only update if greater than current - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Throttle Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-after_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-before_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-max_age>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_counters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-period>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-after_count"] -===== `after_count` - - * Value type is <> - * Default value is `-1` - -Events greater than this count will be throttled. Setting this value to -1, the -default, will cause no events to be throttled based on the upper bound. - -[id="{version}-plugins-{type}s-{plugin}-before_count"] -===== `before_count` - - * Value type is <> - * Default value is `-1` - -Events less than this count will be throttled. Setting this value to -1, the -default, will cause no events to be throttled based on the lower bound. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The key used to identify events. Events with the same key are grouped together. -Field substitutions are allowed, so you can combine multiple fields. - -[id="{version}-plugins-{type}s-{plugin}-max_age"] -===== `max_age` - - * Value type is <> - * Default value is `3600` - -The maximum age of a timeslot. Higher values allow better tracking of an asynchronous -flow of events, but require more memory. As a rule of thumb you should set this value -to at least twice the period. Or set this value to period + maximum time offset -between unordered events with the same key. Values below the specified period give -unexpected results if unordered events are processed simultaneously. - -[id="{version}-plugins-{type}s-{plugin}-max_counters"] -===== `max_counters` - - * Value type is <> - * Default value is `100000` - -The maximum number of counters to store before decreasing the maximum age of a timeslot. -Setting this value to -1 will prevent an upper bound with no constraint on the -number of counters. This configuration value should only be used as a memory -control mechanism and can cause early counter expiration if the value is reached. -It is recommended to leave the default value and ensure that your key is selected -such that it limits the number of counters required (i.e. don't use UUID as the key). - -[id="{version}-plugins-{type}s-{plugin}-period"] -===== `period` - - * Value type is <> - * Default value is `"60"` - -The period in seconds after the first occurrence of an event until a new timeslot -is created. This period is tracked per unique key and per timeslot. -Field substitutions are allowed in this value. This allows you to specify that -certain kinds of events throttle for a specific period of time. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/throttle-v4.0.4.asciidoc b/docs/versioned-plugins/filters/throttle-v4.0.4.asciidoc deleted file mode 100644 index 381eaea87..000000000 --- a/docs/versioned-plugins/filters/throttle-v4.0.4.asciidoc +++ /dev/null @@ -1,252 +0,0 @@ -:plugin: throttle -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.4 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-throttle/blob/v4.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Throttle filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The throttle filter is for throttling the number of events. The filter is -configured with a lower bound, the "before_count", and upper bound, the "after_count", -and a period of time. All events passing through the filter will be counted based on -their key and the event timestamp. As long as the count is less than the "before_count" -or greater than the "after_count", the event will be "throttled" which means the filter -will be considered successful and any tags or fields will be added (or removed). - -The plugin is thread-safe and properly tracks past events. - -For example, if you wanted to throttle events so you only receive an event after 2 -occurrences and you get no more than 3 in 10 minutes, you would use the configuration: -[source,ruby] - period => 600 - max_age => 1200 - before_count => 3 - after_count => 5 - -Which would result in: -========================== - event 1 - throttled (successful filter, period start) - event 2 - throttled (successful filter) - event 3 - not throttled - event 4 - not throttled - event 5 - not throttled - event 6 - throttled (successful filter) - event 7 - throttled (successful filter) - event x - throttled (successful filter) - period end - event 1 - throttled (successful filter, period start) - event 2 - throttled (successful filter) - event 3 - not throttled - event 4 - not throttled - event 5 - not throttled - event 6 - throttled (successful filter) - ... -========================== -Another example is if you wanted to throttle events so you only -receive 1 event per hour, you would use the configuration: -[source,ruby] - period => 3600 - max_age => 7200 - before_count => -1 - after_count => 1 - -Which would result in: -========================== - event 1 - not throttled (period start) - event 2 - throttled (successful filter) - event 3 - throttled (successful filter) - event 4 - throttled (successful filter) - event x - throttled (successful filter) - period end - event 1 - not throttled (period start) - event 2 - throttled (successful filter) - event 3 - throttled (successful filter) - event 4 - throttled (successful filter) - ... -========================== -A common use case would be to use the throttle filter to throttle events before 3 and -after 5 while using multiple fields for the key and then use the drop filter to remove -throttled events. This configuration might appear as: -[source,ruby] - filter { - throttle { - before_count => 3 - after_count => 5 - period => 3600 - max_age => 7200 - key => "%{host}%{message}" - add_tag => "throttled" - } - if "throttled" in [tags] { - drop { } - } - } - -Another case would be to store all events, but only email non-throttled events -so the op's inbox isn't flooded with emails in the event of a system error. -This configuration might appear as: -[source,ruby] - filter { - throttle { - before_count => 3 - after_count => 5 - period => 3600 - max_age => 7200 - key => "%{message}" - add_tag => "throttled" - } - } - output { - if "throttled" not in [tags] { - email { - from => "logstash@mycompany.com" - subject => "Production System Alert" - to => "ops@mycompany.com" - via => "sendmail" - body => "Alert on %{host} from path %{path}:\n\n%{message}" - options => { "location" => "/usr/sbin/sendmail" } - } - } - elasticsearch_http { - host => "localhost" - port => "19200" - } - } - -When an event is received, the event key is stored in a key_cache. The key references -a timeslot_cache. The event is allocated to a timeslot (created dynamically) based on -the timestamp of the event. The timeslot counter is incremented. When the next event is -received (same key), within the same "period", it is allocated to the same timeslot. -The timeslot counter is incremented once again. - -The timeslot expires if the maximum age has been exceeded. The age is calculated -based on the latest event timestamp and the max_age configuration option. - - ---[::.. DESIGN ..::]--- - -+- [key_cache] -+ +-- [timeslot_cache] --+ -| | | @created: 1439839636 | - | @latest: 1439839836 | - [a.b.c] => +----------------------+ - | [1439839636] => 1 | - | [1439839736] => 3 | - | [1439839836] => 2 | - +----------------------+ - - +-- [timeslot_cache] --+ - | @created: eeeeeeeeee | - | @latest: llllllllll | - [x.y.z] => +----------------------+ - | [0000000060] => x | - | [0000000120] => y | -| | | [..........] => N | -+---------------+ +----------------------+ - -Frank de Jong (@frapex) -Mike Pilone (@mikepilone) - -only update if greater than current - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Throttle Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-after_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-before_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-max_age>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_counters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-period>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-after_count"] -===== `after_count` - - * Value type is <> - * Default value is `-1` - -Events greater than this count will be throttled. Setting this value to -1, the -default, will cause no events to be throttled based on the upper bound. - -[id="{version}-plugins-{type}s-{plugin}-before_count"] -===== `before_count` - - * Value type is <> - * Default value is `-1` - -Events less than this count will be throttled. Setting this value to -1, the -default, will cause no events to be throttled based on the lower bound. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The key used to identify events. Events with the same key are grouped together. -Field substitutions are allowed, so you can combine multiple fields. - -[id="{version}-plugins-{type}s-{plugin}-max_age"] -===== `max_age` - - * Value type is <> - * Default value is `3600` - -The maximum age of a timeslot. Higher values allow better tracking of an asynchronous -flow of events, but require more memory. As a rule of thumb you should set this value -to at least twice the period. Or set this value to period + maximum time offset -between unordered events with the same key. Values below the specified period give -unexpected results if unordered events are processed simultaneously. - -[id="{version}-plugins-{type}s-{plugin}-max_counters"] -===== `max_counters` - - * Value type is <> - * Default value is `100000` - -The maximum number of counters to store before decreasing the maximum age of a timeslot. -Setting this value to -1 will prevent an upper bound with no constraint on the -number of counters. This configuration value should only be used as a memory -control mechanism and can cause early counter expiration if the value is reached. -It is recommended to leave the default value and ensure that your key is selected -such that it limits the number of counters required (i.e. don't use UUID as the key). - -[id="{version}-plugins-{type}s-{plugin}-period"] -===== `period` - - * Value type is <> - * Default value is `"60"` - -The period in seconds after the first occurrence of an event until a new timeslot -is created. This period is tracked per unique key and per timeslot. -Field substitutions are allowed in this value. This allows you to specify that -certain kinds of events throttle for a specific period of time. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/tld-index.asciidoc b/docs/versioned-plugins/filters/tld-index.asciidoc deleted file mode 100644 index 52f0574d3..000000000 --- a/docs/versioned-plugins/filters/tld-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: tld -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::tld-v3.0.3.asciidoc[] -include::tld-v3.0.2.asciidoc[] -include::tld-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/tld-v3.0.1.asciidoc b/docs/versioned-plugins/filters/tld-v3.0.1.asciidoc deleted file mode 100644 index 6235caaf5..000000000 --- a/docs/versioned-plugins/filters/tld-v3.0.1.asciidoc +++ /dev/null @@ -1,73 +0,0 @@ -:plugin: tld -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-tld/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Tld filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This example filter will replace the contents of the default -message field with whatever you specify in the configuration. - -It is only intended to be used as an example. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Tld Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -Setting the config_name here is required. This is how you -configure this filter from your Logstash config. - -filter { - example { - message => "My message..." - } -} - -The source field to parse - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"tld"` - -The target field to place all the data - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/tld-v3.0.2.asciidoc b/docs/versioned-plugins/filters/tld-v3.0.2.asciidoc deleted file mode 100644 index 98747f1e4..000000000 --- a/docs/versioned-plugins/filters/tld-v3.0.2.asciidoc +++ /dev/null @@ -1,73 +0,0 @@ -:plugin: tld -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-tld/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Tld filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This example filter will replace the contents of the default -message field with whatever you specify in the configuration. - -It is only intended to be used as an example. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Tld Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -Setting the config_name here is required. This is how you -configure this filter from your Logstash config. - -filter { - example { - message => "My message..." - } -} - -The source field to parse - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"tld"` - -The target field to place all the data - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/tld-v3.0.3.asciidoc b/docs/versioned-plugins/filters/tld-v3.0.3.asciidoc deleted file mode 100644 index a114a1b3c..000000000 --- a/docs/versioned-plugins/filters/tld-v3.0.3.asciidoc +++ /dev/null @@ -1,73 +0,0 @@ -:plugin: tld -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-tld/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Tld filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This example filter will replace the contents of the default -message field with whatever you specify in the configuration. - -It is only intended to be used as an example. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Tld Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * Value type is <> - * Default value is `"message"` - -Setting the config_name here is required. This is how you -configure this filter from your Logstash config. - -filter { - example { - message => "My message..." - } -} - -The source field to parse - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * Default value is `"tld"` - -The target field to place all the data - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/translate-index.asciidoc b/docs/versioned-plugins/filters/translate-index.asciidoc deleted file mode 100644 index dd85f98c7..000000000 --- a/docs/versioned-plugins/filters/translate-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: translate -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::translate-v3.0.4.asciidoc[] -include::translate-v3.0.3.asciidoc[] -include::translate-v3.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/filters/translate-v3.0.2.asciidoc b/docs/versioned-plugins/filters/translate-v3.0.2.asciidoc deleted file mode 100644 index abf6c8101..000000000 --- a/docs/versioned-plugins/filters/translate-v3.0.2.asciidoc +++ /dev/null @@ -1,211 +0,0 @@ -:plugin: translate -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-translate/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Translate filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -A general search and replace tool that uses a configured hash -and/or a file to determine replacement values. Currently supported are -YAML, JSON, and CSV files. - -The dictionary entries can be specified in one of two ways: First, -the `dictionary` configuration item may contain a hash representing -the mapping. Second, an external file (readable by logstash) may be specified -in the `dictionary_path` configuration item. These two methods may not be used -in conjunction; it will produce an error. - -Operationally, if the event field specified in the `field` configuration -matches the EXACT contents of a dictionary entry key (or matches a regex if -`regex` configuration item has been enabled), the field's value will be substituted -with the matched key's value from the dictionary. - -By default, the translate filter will replace the contents of the -maching event field (in-place). However, by using the `destination` -configuration item, you may also specify a target event field to -populate with the new translated value. - -Alternatively, for simple string search and replacements for just a few values -you might consider using the gsub function of the mutate filter. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Translate Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dictionary>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dictionary_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-exact>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fallback>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-override>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-refresh_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-regex>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-destination"] -===== `destination` - - * Value type is <> - * Default value is `"translation"` - -The destination field you wish to populate with the translated code. The default -is a field named `translation`. Set this to the same value as source if you want -to do a substitution, in this case filter will allways succeed. This will clobber -the old value of the source field! - -[id="{version}-plugins-{type}s-{plugin}-dictionary"] -===== `dictionary` - - * Value type is <> - * Default value is `{}` - -The dictionary to use for translation, when specified in the logstash filter -configuration item (i.e. do not use the `@dictionary_path` file). - -Example: -[source,ruby] - filter { - translate { - dictionary => [ "100", "Continue", - "101", "Switching Protocols", - "merci", "thank you", - "old version", "new version" ] - } - } - -NOTE: It is an error to specify both `dictionary` and `dictionary_path`. - -[id="{version}-plugins-{type}s-{plugin}-dictionary_path"] -===== `dictionary_path` - - * Value type is <> - * There is no default value for this setting. - -The full path of the external dictionary file. The format of the table -should be a standard YAML, JSON, or CSV. Make sure you specify any integer-based keys -in quotes. For example, the YAML file should look something like this: -[source,ruby] - "100": Continue - "101": Switching Protocols - merci: gracias - old version: new version - -NOTE: it is an error to specify both `dictionary` and `dictionary_path`. - -The currently supported formats are YAML, JSON, and CSV. Format selection is -based on the file extension: `json` for JSON, `yaml` or `yml` for YAML, and -`csv` for CSV. The JSON format only supports simple key/value, unnested -objects. The CSV format expects exactly two columns, with the first serving -as the original text, and the second column as the replacement. - -[id="{version}-plugins-{type}s-{plugin}-exact"] -===== `exact` - - * Value type is <> - * Default value is `true` - -When `exact => true`, the translate filter will populate the destination field -with the exact contents of the dictionary value. When `exact => false`, the -filter will populate the destination field with the result of any existing -destination field's data, with the translated value substituted in-place. - -For example, consider this simple translation.yml, configured to check the `data` field: -[source,ruby] - foo: bar - -If logstash receives an event with the `data` field set to `foo`, and `exact => true`, -the destination field will be populated with the string `bar`. -If `exact => false`, and logstash receives the same event, the destination field -will be also set to `bar`. However, if logstash receives an event with the `data` field -set to `foofing`, the destination field will be set to `barfing`. - -Set both `exact => true` AND `regex => `true` if you would like to match using dictionary -keys as regular expressions. A large dictionary could be expensive to match in this case. - -[id="{version}-plugins-{type}s-{plugin}-fallback"] -===== `fallback` - - * Value type is <> - * There is no default value for this setting. - -In case no translation occurs in the event (no matches), this will add a default -translation string, which will always populate `field`, if the match failed. - -For example, if we have configured `fallback => "no match"`, using this dictionary: -[source,ruby] - foo: bar - -Then, if logstash received an event with the field `foo` set to `bar`, the destination -field would be set to `bar`. However, if logstash received an event with `foo` set to `nope`, -then the destination field would still be populated, but with the value of `no match`. -This configuration can be dynamic and include parts of the event using the `%{field}` syntax. - -[id="{version}-plugins-{type}s-{plugin}-field"] -===== `field` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the logstash event field containing the value to be compared for a -match by the translate filter (e.g. `message`, `host`, `response_code`). - -If this field is an array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-override"] -===== `override` - - * Value type is <> - * Default value is `false` - -If the destination (or target) field already exists, this configuration item specifies -whether the filter should skip translation (default) or overwrite the target field -value with the new translation value. - -[id="{version}-plugins-{type}s-{plugin}-refresh_interval"] -===== `refresh_interval` - - * Value type is <> - * Default value is `300` - -When using a dictionary file, this setting will indicate how frequently -(in seconds) logstash will check the dictionary file for updates. - -[id="{version}-plugins-{type}s-{plugin}-regex"] -===== `regex` - - * Value type is <> - * Default value is `false` - -If you'd like to treat dictionary keys as regular expressions, set `exact => true`. -Note: this is activated only when `exact => true`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/translate-v3.0.3.asciidoc b/docs/versioned-plugins/filters/translate-v3.0.3.asciidoc deleted file mode 100644 index 51a4d43b5..000000000 --- a/docs/versioned-plugins/filters/translate-v3.0.3.asciidoc +++ /dev/null @@ -1,211 +0,0 @@ -:plugin: translate -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-translate/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Translate filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -A general search and replace tool that uses a configured hash -and/or a file to determine replacement values. Currently supported are -YAML, JSON, and CSV files. - -The dictionary entries can be specified in one of two ways: First, -the `dictionary` configuration item may contain a hash representing -the mapping. Second, an external file (readable by logstash) may be specified -in the `dictionary_path` configuration item. These two methods may not be used -in conjunction; it will produce an error. - -Operationally, if the event field specified in the `field` configuration -matches the EXACT contents of a dictionary entry key (or matches a regex if -`regex` configuration item has been enabled), the field's value will be substituted -with the matched key's value from the dictionary. - -By default, the translate filter will replace the contents of the -maching event field (in-place). However, by using the `destination` -configuration item, you may also specify a target event field to -populate with the new translated value. - -Alternatively, for simple string search and replacements for just a few values -you might consider using the gsub function of the mutate filter. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Translate Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dictionary>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dictionary_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-exact>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fallback>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-override>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-refresh_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-regex>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-destination"] -===== `destination` - - * Value type is <> - * Default value is `"translation"` - -The destination field you wish to populate with the translated code. The default -is a field named `translation`. Set this to the same value as source if you want -to do a substitution, in this case filter will allways succeed. This will clobber -the old value of the source field! - -[id="{version}-plugins-{type}s-{plugin}-dictionary"] -===== `dictionary` - - * Value type is <> - * Default value is `{}` - -The dictionary to use for translation, when specified in the logstash filter -configuration item (i.e. do not use the `@dictionary_path` file). - -Example: -[source,ruby] - filter { - translate { - dictionary => [ "100", "Continue", - "101", "Switching Protocols", - "merci", "thank you", - "old version", "new version" ] - } - } - -NOTE: It is an error to specify both `dictionary` and `dictionary_path`. - -[id="{version}-plugins-{type}s-{plugin}-dictionary_path"] -===== `dictionary_path` - - * Value type is <> - * There is no default value for this setting. - -The full path of the external dictionary file. The format of the table -should be a standard YAML, JSON, or CSV. Make sure you specify any integer-based keys -in quotes. For example, the YAML file should look something like this: -[source,ruby] - "100": Continue - "101": Switching Protocols - merci: gracias - old version: new version - -NOTE: it is an error to specify both `dictionary` and `dictionary_path`. - -The currently supported formats are YAML, JSON, and CSV. Format selection is -based on the file extension: `json` for JSON, `yaml` or `yml` for YAML, and -`csv` for CSV. The JSON format only supports simple key/value, unnested -objects. The CSV format expects exactly two columns, with the first serving -as the original text, and the second column as the replacement. - -[id="{version}-plugins-{type}s-{plugin}-exact"] -===== `exact` - - * Value type is <> - * Default value is `true` - -When `exact => true`, the translate filter will populate the destination field -with the exact contents of the dictionary value. When `exact => false`, the -filter will populate the destination field with the result of any existing -destination field's data, with the translated value substituted in-place. - -For example, consider this simple translation.yml, configured to check the `data` field: -[source,ruby] - foo: bar - -If logstash receives an event with the `data` field set to `foo`, and `exact => true`, -the destination field will be populated with the string `bar`. -If `exact => false`, and logstash receives the same event, the destination field -will be also set to `bar`. However, if logstash receives an event with the `data` field -set to `foofing`, the destination field will be set to `barfing`. - -Set both `exact => true` AND `regex => `true` if you would like to match using dictionary -keys as regular expressions. A large dictionary could be expensive to match in this case. - -[id="{version}-plugins-{type}s-{plugin}-fallback"] -===== `fallback` - - * Value type is <> - * There is no default value for this setting. - -In case no translation occurs in the event (no matches), this will add a default -translation string, which will always populate `field`, if the match failed. - -For example, if we have configured `fallback => "no match"`, using this dictionary: -[source,ruby] - foo: bar - -Then, if logstash received an event with the field `foo` set to `bar`, the destination -field would be set to `bar`. However, if logstash received an event with `foo` set to `nope`, -then the destination field would still be populated, but with the value of `no match`. -This configuration can be dynamic and include parts of the event using the `%{field}` syntax. - -[id="{version}-plugins-{type}s-{plugin}-field"] -===== `field` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the logstash event field containing the value to be compared for a -match by the translate filter (e.g. `message`, `host`, `response_code`). - -If this field is an array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-override"] -===== `override` - - * Value type is <> - * Default value is `false` - -If the destination (or target) field already exists, this configuration item specifies -whether the filter should skip translation (default) or overwrite the target field -value with the new translation value. - -[id="{version}-plugins-{type}s-{plugin}-refresh_interval"] -===== `refresh_interval` - - * Value type is <> - * Default value is `300` - -When using a dictionary file, this setting will indicate how frequently -(in seconds) logstash will check the dictionary file for updates. - -[id="{version}-plugins-{type}s-{plugin}-regex"] -===== `regex` - - * Value type is <> - * Default value is `false` - -If you'd like to treat dictionary keys as regular expressions, set `exact => true`. -Note: this is activated only when `exact => true`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/translate-v3.0.4.asciidoc b/docs/versioned-plugins/filters/translate-v3.0.4.asciidoc deleted file mode 100644 index d44551ccf..000000000 --- a/docs/versioned-plugins/filters/translate-v3.0.4.asciidoc +++ /dev/null @@ -1,211 +0,0 @@ -:plugin: translate -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-translate/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Translate filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -A general search and replace tool that uses a configured hash -and/or a file to determine replacement values. Currently supported are -YAML, JSON, and CSV files. - -The dictionary entries can be specified in one of two ways: First, -the `dictionary` configuration item may contain a hash representing -the mapping. Second, an external file (readable by logstash) may be specified -in the `dictionary_path` configuration item. These two methods may not be used -in conjunction; it will produce an error. - -Operationally, if the event field specified in the `field` configuration -matches the EXACT contents of a dictionary entry key (or matches a regex if -`regex` configuration item has been enabled), the field's value will be substituted -with the matched key's value from the dictionary. - -By default, the translate filter will replace the contents of the -maching event field (in-place). However, by using the `destination` -configuration item, you may also specify a target event field to -populate with the new translated value. - -Alternatively, for simple string search and replacements for just a few values -you might consider using the gsub function of the mutate filter. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Translate Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dictionary>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dictionary_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-exact>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fallback>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-override>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-refresh_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-regex>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-destination"] -===== `destination` - - * Value type is <> - * Default value is `"translation"` - -The destination field you wish to populate with the translated code. The default -is a field named `translation`. Set this to the same value as source if you want -to do a substitution, in this case filter will allways succeed. This will clobber -the old value of the source field! - -[id="{version}-plugins-{type}s-{plugin}-dictionary"] -===== `dictionary` - - * Value type is <> - * Default value is `{}` - -The dictionary to use for translation, when specified in the logstash filter -configuration item (i.e. do not use the `@dictionary_path` file). - -Example: -[source,ruby] - filter { - translate { - dictionary => [ "100", "Continue", - "101", "Switching Protocols", - "merci", "thank you", - "old version", "new version" ] - } - } - -NOTE: It is an error to specify both `dictionary` and `dictionary_path`. - -[id="{version}-plugins-{type}s-{plugin}-dictionary_path"] -===== `dictionary_path` - - * Value type is <> - * There is no default value for this setting. - -The full path of the external dictionary file. The format of the table -should be a standard YAML, JSON, or CSV. Make sure you specify any integer-based keys -in quotes. For example, the YAML file should look something like this: -[source,ruby] - "100": Continue - "101": Switching Protocols - merci: gracias - old version: new version - -NOTE: it is an error to specify both `dictionary` and `dictionary_path`. - -The currently supported formats are YAML, JSON, and CSV. Format selection is -based on the file extension: `json` for JSON, `yaml` or `yml` for YAML, and -`csv` for CSV. The JSON format only supports simple key/value, unnested -objects. The CSV format expects exactly two columns, with the first serving -as the original text, and the second column as the replacement. - -[id="{version}-plugins-{type}s-{plugin}-exact"] -===== `exact` - - * Value type is <> - * Default value is `true` - -When `exact => true`, the translate filter will populate the destination field -with the exact contents of the dictionary value. When `exact => false`, the -filter will populate the destination field with the result of any existing -destination field's data, with the translated value substituted in-place. - -For example, consider this simple translation.yml, configured to check the `data` field: -[source,ruby] - foo: bar - -If logstash receives an event with the `data` field set to `foo`, and `exact => true`, -the destination field will be populated with the string `bar`. -If `exact => false`, and logstash receives the same event, the destination field -will be also set to `bar`. However, if logstash receives an event with the `data` field -set to `foofing`, the destination field will be set to `barfing`. - -Set both `exact => true` AND `regex => `true` if you would like to match using dictionary -keys as regular expressions. A large dictionary could be expensive to match in this case. - -[id="{version}-plugins-{type}s-{plugin}-fallback"] -===== `fallback` - - * Value type is <> - * There is no default value for this setting. - -In case no translation occurs in the event (no matches), this will add a default -translation string, which will always populate `field`, if the match failed. - -For example, if we have configured `fallback => "no match"`, using this dictionary: -[source,ruby] - foo: bar - -Then, if logstash received an event with the field `foo` set to `bar`, the destination -field would be set to `bar`. However, if logstash received an event with `foo` set to `nope`, -then the destination field would still be populated, but with the value of `no match`. -This configuration can be dynamic and include parts of the event using the `%{field}` syntax. - -[id="{version}-plugins-{type}s-{plugin}-field"] -===== `field` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the logstash event field containing the value to be compared for a -match by the translate filter (e.g. `message`, `host`, `response_code`). - -If this field is an array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-override"] -===== `override` - - * Value type is <> - * Default value is `false` - -If the destination (or target) field already exists, this configuration item specifies -whether the filter should skip translation (default) or overwrite the target field -value with the new translation value. - -[id="{version}-plugins-{type}s-{plugin}-refresh_interval"] -===== `refresh_interval` - - * Value type is <> - * Default value is `300` - -When using a dictionary file, this setting will indicate how frequently -(in seconds) logstash will check the dictionary file for updates. - -[id="{version}-plugins-{type}s-{plugin}-regex"] -===== `regex` - - * Value type is <> - * Default value is `false` - -If you'd like to treat dictionary keys as regular expressions, set `exact => true`. -Note: this is activated only when `exact => true`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/truncate-index.asciidoc b/docs/versioned-plugins/filters/truncate-index.asciidoc deleted file mode 100644 index 40cfb3ddd..000000000 --- a/docs/versioned-plugins/filters/truncate-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: truncate -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::truncate-v1.0.4.asciidoc[] -include::truncate-v1.0.3.asciidoc[] -include::truncate-v1.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/filters/truncate-v1.0.2.asciidoc b/docs/versioned-plugins/filters/truncate-v1.0.2.asciidoc deleted file mode 100644 index d6be312a3..000000000 --- a/docs/versioned-plugins/filters/truncate-v1.0.2.asciidoc +++ /dev/null @@ -1,84 +0,0 @@ -:plugin: truncate -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-truncate/blob/v1.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Truncate filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Allows you to truncate fields longer than a given length. - -This truncates on bytes values, not character count. In practice, this -should mean that the truncated length is somewhere between `length_bytes` and -`length_bytes - 6` (UTF-8 supports up to 6-byte characters). - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Truncate Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-length_bytes>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * There is no default value for this setting. - -A list of fieldrefs to truncate if they are too long. - -If not specified, the default behavior will be to attempt truncation on all -strings in the event. This default behavior could be computationally -expensive, so if you know exactly which fields you wish to truncate, it is -advised that you be specific and configure the fields you want truncated. - -Special behaviors for non-string fields: - -* Numbers: No action -* Array: this plugin will attempt truncation on all elements of that array. -* Hash: truncate will try all values of the hash (recursively, if this hash -contains other hashes). - -[id="{version}-plugins-{type}s-{plugin}-length_bytes"] -===== `length_bytes` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Fields over this length will be truncated to this length. - -Truncation happens from the end of the text (the start will be kept). - -As an example, if you set `length_bytes => 10` and a field contains "hello -world, how are you?", then this field will be truncated and have this value: -"hello worl" - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/truncate-v1.0.3.asciidoc b/docs/versioned-plugins/filters/truncate-v1.0.3.asciidoc deleted file mode 100644 index ff4796cda..000000000 --- a/docs/versioned-plugins/filters/truncate-v1.0.3.asciidoc +++ /dev/null @@ -1,84 +0,0 @@ -:plugin: truncate -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.3 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-truncate/blob/v1.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Truncate filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Allows you to truncate fields longer than a given length. - -This truncates on bytes values, not character count. In practice, this -should mean that the truncated length is somewhere between `length_bytes` and -`length_bytes - 6` (UTF-8 supports up to 6-byte characters). - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Truncate Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-length_bytes>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * There is no default value for this setting. - -A list of fieldrefs to truncate if they are too long. - -If not specified, the default behavior will be to attempt truncation on all -strings in the event. This default behavior could be computationally -expensive, so if you know exactly which fields you wish to truncate, it is -advised that you be specific and configure the fields you want truncated. - -Special behaviors for non-string fields: - -* Numbers: No action -* Array: this plugin will attempt truncation on all elements of that array. -* Hash: truncate will try all values of the hash (recursively, if this hash -contains other hashes). - -[id="{version}-plugins-{type}s-{plugin}-length_bytes"] -===== `length_bytes` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Fields over this length will be truncated to this length. - -Truncation happens from the end of the text (the start will be kept). - -As an example, if you set `length_bytes => 10` and a field contains "hello -world, how are you?", then this field will be truncated and have this value: -"hello worl" - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/truncate-v1.0.4.asciidoc b/docs/versioned-plugins/filters/truncate-v1.0.4.asciidoc deleted file mode 100644 index d13593c76..000000000 --- a/docs/versioned-plugins/filters/truncate-v1.0.4.asciidoc +++ /dev/null @@ -1,84 +0,0 @@ -:plugin: truncate -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.4 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-truncate/blob/v1.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Truncate filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Allows you to truncate fields longer than a given length. - -This truncates on bytes values, not character count. In practice, this -should mean that the truncated length is somewhere between `length_bytes` and -`length_bytes - 6` (UTF-8 supports up to 6-byte characters). - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Truncate Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-length_bytes>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * Value type is <> - * There is no default value for this setting. - -A list of fieldrefs to truncate if they are too long. - -If not specified, the default behavior will be to attempt truncation on all -strings in the event. This default behavior could be computationally -expensive, so if you know exactly which fields you wish to truncate, it is -advised that you be specific and configure the fields you want truncated. - -Special behaviors for non-string fields: - -* Numbers: No action -* Array: this plugin will attempt truncation on all elements of that array. -* Hash: truncate will try all values of the hash (recursively, if this hash -contains other hashes). - -[id="{version}-plugins-{type}s-{plugin}-length_bytes"] -===== `length_bytes` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Fields over this length will be truncated to this length. - -Truncation happens from the end of the text (the start will be kept). - -As an example, if you set `length_bytes => 10` and a field contains "hello -world, how are you?", then this field will be truncated and have this value: -"hello worl" - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/unique-index.asciidoc b/docs/versioned-plugins/filters/unique-index.asciidoc deleted file mode 100644 index e36df6726..000000000 --- a/docs/versioned-plugins/filters/unique-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: unique -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-12-13 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::unique-v3.0.0.asciidoc[] -include::unique-v2.0.6.asciidoc[] -include::unique-v2.0.5.asciidoc[] - diff --git a/docs/versioned-plugins/filters/unique-v2.0.5.asciidoc b/docs/versioned-plugins/filters/unique-v2.0.5.asciidoc deleted file mode 100644 index 9585a48dc..000000000 --- a/docs/versioned-plugins/filters/unique-v2.0.5.asciidoc +++ /dev/null @@ -1,53 +0,0 @@ -:plugin: unique -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-unique/blob/v2.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Unique filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Unique Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The fields on which to run the unique filter. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/unique-v2.0.6.asciidoc b/docs/versioned-plugins/filters/unique-v2.0.6.asciidoc deleted file mode 100644 index 87b6252b0..000000000 --- a/docs/versioned-plugins/filters/unique-v2.0.6.asciidoc +++ /dev/null @@ -1,53 +0,0 @@ -:plugin: unique -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.6 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-unique/blob/v2.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Unique filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Unique Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The fields on which to run the unique filter. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/unique-v3.0.0.asciidoc b/docs/versioned-plugins/filters/unique-v3.0.0.asciidoc deleted file mode 100644 index 23802949d..000000000 --- a/docs/versioned-plugins/filters/unique-v3.0.0.asciidoc +++ /dev/null @@ -1,53 +0,0 @@ -:plugin: unique -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.0 -:release_date: 2017-12-13 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-unique/blob/v3.0.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Unique filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Unique Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The fields on which to run the unique filter. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/urldecode-index.asciidoc b/docs/versioned-plugins/filters/urldecode-index.asciidoc deleted file mode 100644 index 5c842a410..000000000 --- a/docs/versioned-plugins/filters/urldecode-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: urldecode -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::urldecode-v3.0.6.asciidoc[] -include::urldecode-v3.0.5.asciidoc[] -include::urldecode-v3.0.4.asciidoc[] - diff --git a/docs/versioned-plugins/filters/urldecode-v3.0.4.asciidoc b/docs/versioned-plugins/filters/urldecode-v3.0.4.asciidoc deleted file mode 100644 index d45825ba1..000000000 --- a/docs/versioned-plugins/filters/urldecode-v3.0.4.asciidoc +++ /dev/null @@ -1,83 +0,0 @@ -:plugin: urldecode -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-urldecode/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Urldecode filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The urldecode filter is for decoding fields that are urlencoded. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Urldecode Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-all_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-all_fields"] -===== `all_fields` - - * Value type is <> - * Default value is `false` - -Urldecode all fields - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -Thel character encoding used in this filter. Examples include `UTF-8` -and `cp1252` - -This setting is useful if your url decoded string are in `Latin-1` (aka `cp1252`) -or in another character set other than `UTF-8`. - -[id="{version}-plugins-{type}s-{plugin}-field"] -===== `field` - - * Value type is <> - * Default value is `"message"` - -The field which value is urldecoded - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_urldecodefailure"]` - -Append values to the `tags` field when an exception is thrown - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/urldecode-v3.0.5.asciidoc b/docs/versioned-plugins/filters/urldecode-v3.0.5.asciidoc deleted file mode 100644 index 3827e8217..000000000 --- a/docs/versioned-plugins/filters/urldecode-v3.0.5.asciidoc +++ /dev/null @@ -1,83 +0,0 @@ -:plugin: urldecode -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-urldecode/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Urldecode filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The urldecode filter is for decoding fields that are urlencoded. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Urldecode Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-all_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-all_fields"] -===== `all_fields` - - * Value type is <> - * Default value is `false` - -Urldecode all fields - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -Thel character encoding used in this filter. Examples include `UTF-8` -and `cp1252` - -This setting is useful if your url decoded string are in `Latin-1` (aka `cp1252`) -or in another character set other than `UTF-8`. - -[id="{version}-plugins-{type}s-{plugin}-field"] -===== `field` - - * Value type is <> - * Default value is `"message"` - -The field which value is urldecoded - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_urldecodefailure"]` - -Append values to the `tags` field when an exception is thrown - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/urldecode-v3.0.6.asciidoc b/docs/versioned-plugins/filters/urldecode-v3.0.6.asciidoc deleted file mode 100644 index 4085edc58..000000000 --- a/docs/versioned-plugins/filters/urldecode-v3.0.6.asciidoc +++ /dev/null @@ -1,83 +0,0 @@ -:plugin: urldecode -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-urldecode/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Urldecode filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The urldecode filter is for decoding fields that are urlencoded. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Urldecode Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-all_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No -| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-all_fields"] -===== `all_fields` - - * Value type is <> - * Default value is `false` - -Urldecode all fields - -[id="{version}-plugins-{type}s-{plugin}-charset"] -===== `charset` - - * Value can be any of: `ASCII-8BIT`, `UTF-8`, `US-ASCII`, `Big5`, `Big5-HKSCS`, `Big5-UAO`, `CP949`, `Emacs-Mule`, `EUC-JP`, `EUC-KR`, `EUC-TW`, `GB2312`, `GB18030`, `GBK`, `ISO-8859-1`, `ISO-8859-2`, `ISO-8859-3`, `ISO-8859-4`, `ISO-8859-5`, `ISO-8859-6`, `ISO-8859-7`, `ISO-8859-8`, `ISO-8859-9`, `ISO-8859-10`, `ISO-8859-11`, `ISO-8859-13`, `ISO-8859-14`, `ISO-8859-15`, `ISO-8859-16`, `KOI8-R`, `KOI8-U`, `Shift_JIS`, `UTF-16BE`, `UTF-16LE`, `UTF-32BE`, `UTF-32LE`, `Windows-31J`, `Windows-1250`, `Windows-1251`, `Windows-1252`, `IBM437`, `IBM737`, `IBM775`, `CP850`, `IBM852`, `CP852`, `IBM855`, `CP855`, `IBM857`, `IBM860`, `IBM861`, `IBM862`, `IBM863`, `IBM864`, `IBM865`, `IBM866`, `IBM869`, `Windows-1258`, `GB1988`, `macCentEuro`, `macCroatian`, `macCyrillic`, `macGreek`, `macIceland`, `macRoman`, `macRomania`, `macThai`, `macTurkish`, `macUkraine`, `CP950`, `CP951`, `IBM037`, `stateless-ISO-2022-JP`, `eucJP-ms`, `CP51932`, `EUC-JIS-2004`, `GB12345`, `ISO-2022-JP`, `ISO-2022-JP-2`, `CP50220`, `CP50221`, `Windows-1256`, `Windows-1253`, `Windows-1255`, `Windows-1254`, `TIS-620`, `Windows-874`, `Windows-1257`, `MacJapanese`, `UTF-7`, `UTF8-MAC`, `UTF-16`, `UTF-32`, `UTF8-DoCoMo`, `SJIS-DoCoMo`, `UTF8-KDDI`, `SJIS-KDDI`, `ISO-2022-JP-KDDI`, `stateless-ISO-2022-JP-KDDI`, `UTF8-SoftBank`, `SJIS-SoftBank`, `BINARY`, `CP437`, `CP737`, `CP775`, `IBM850`, `CP857`, `CP860`, `CP861`, `CP862`, `CP863`, `CP864`, `CP865`, `CP866`, `CP869`, `CP1258`, `Big5-HKSCS:2008`, `ebcdic-cp-us`, `eucJP`, `euc-jp-ms`, `EUC-JISX0213`, `eucKR`, `eucTW`, `EUC-CN`, `eucCN`, `CP936`, `ISO2022-JP`, `ISO2022-JP2`, `ISO8859-1`, `ISO8859-2`, `ISO8859-3`, `ISO8859-4`, `ISO8859-5`, `ISO8859-6`, `CP1256`, `ISO8859-7`, `CP1253`, `ISO8859-8`, `CP1255`, `ISO8859-9`, `CP1254`, `ISO8859-10`, `ISO8859-11`, `CP874`, `ISO8859-13`, `CP1257`, `ISO8859-14`, `ISO8859-15`, `ISO8859-16`, `CP878`, `MacJapan`, `ASCII`, `ANSI_X3.4-1968`, `646`, `CP65000`, `CP65001`, `UTF-8-MAC`, `UTF-8-HFS`, `UCS-2BE`, `UCS-4BE`, `UCS-4LE`, `CP932`, `csWindows31J`, `SJIS`, `PCK`, `CP1250`, `CP1251`, `CP1252`, `external`, `locale` - * Default value is `"UTF-8"` - -Thel character encoding used in this filter. Examples include `UTF-8` -and `cp1252` - -This setting is useful if your url decoded string are in `Latin-1` (aka `cp1252`) -or in another character set other than `UTF-8`. - -[id="{version}-plugins-{type}s-{plugin}-field"] -===== `field` - - * Value type is <> - * Default value is `"message"` - -The field which value is urldecoded - -[id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] -===== `tag_on_failure` - - * Value type is <> - * Default value is `["_urldecodefailure"]` - -Append values to the `tags` field when an exception is thrown - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/useragent-index.asciidoc b/docs/versioned-plugins/filters/useragent-index.asciidoc deleted file mode 100644 index 0f51eeb6f..000000000 --- a/docs/versioned-plugins/filters/useragent-index.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -:plugin: useragent -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-07-10 -| <> | 2017-07-06 -| <> | 2017-05-15 -| <> | 2017-05-10 -|======================================================================= - -include::useragent-v3.2.2.asciidoc[] -include::useragent-v3.2.1.asciidoc[] -include::useragent-v3.2.0.asciidoc[] -include::useragent-v3.1.3.asciidoc[] -include::useragent-v3.1.1.asciidoc[] -include::useragent-v3.1.0.asciidoc[] - diff --git a/docs/versioned-plugins/filters/useragent-v3.1.0.asciidoc b/docs/versioned-plugins/filters/useragent-v3.1.0.asciidoc deleted file mode 100644 index dd3e6f450..000000000 --- a/docs/versioned-plugins/filters/useragent-v3.1.0.asciidoc +++ /dev/null @@ -1,117 +0,0 @@ -:plugin: useragent -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.0 -:release_date: 2017-05-10 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-useragent/blob/v3.1.0/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Useragent - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Parse user agent strings into structured data based on BrowserScope data - -UserAgent filter, adds information about user agent like family, operating -system, version, and device - -Logstash releases ship with the regexes.yaml database made available from -ua-parser with an Apache 2.0 license. For more details on ua-parser, see -. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Useragent Filter Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-lru_cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-regexes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] -===== `lru_cache_size` - - * Value type is <> - * Default value is `1000` - -UA parsing is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that -user agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. - -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global. That is to say all instances of the user agent filter -share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-prefix"] -===== `prefix` - - * Value type is <> - * Default value is `""` - -A string to prepend to all of the extracted keys - -[id="{version}-plugins-{type}s-{plugin}-regexes"] -===== `regexes` - - * Value type is <> - * There is no default value for this setting. - -`regexes.yaml` file to use - -If not specified, this will default to the `regexes.yaml` that ships -with logstash. - -You can find the latest version of this here: - - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field containing the user agent string. If this field is an -array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -The name of the field to assign user agent data into. - -If not specified user agent data will be stored in the root of the event. - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/useragent-v3.1.1.asciidoc b/docs/versioned-plugins/filters/useragent-v3.1.1.asciidoc deleted file mode 100644 index 55fa6aa1c..000000000 --- a/docs/versioned-plugins/filters/useragent-v3.1.1.asciidoc +++ /dev/null @@ -1,117 +0,0 @@ -:plugin: useragent -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.1 -:release_date: 2017-05-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-useragent/blob/v3.1.1/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Useragent - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Parse user agent strings into structured data based on BrowserScope data - -UserAgent filter, adds information about user agent like family, operating -system, version, and device - -Logstash releases ship with the regexes.yaml database made available from -ua-parser with an Apache 2.0 license. For more details on ua-parser, see -. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Useragent Filter Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-lru_cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-regexes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] -===== `lru_cache_size` - - * Value type is <> - * Default value is `1000` - -UA parsing is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that -user agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. - -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global. That is to say all instances of the user agent filter -share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-prefix"] -===== `prefix` - - * Value type is <> - * Default value is `""` - -A string to prepend to all of the extracted keys - -[id="{version}-plugins-{type}s-{plugin}-regexes"] -===== `regexes` - - * Value type is <> - * There is no default value for this setting. - -`regexes.yaml` file to use - -If not specified, this will default to the `regexes.yaml` that ships -with logstash. - -You can find the latest version of this here: - - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field containing the user agent string. If this field is an -array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -The name of the field to assign user agent data into. - -If not specified user agent data will be stored in the root of the event. - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/useragent-v3.1.3.asciidoc b/docs/versioned-plugins/filters/useragent-v3.1.3.asciidoc deleted file mode 100644 index 85c7340ad..000000000 --- a/docs/versioned-plugins/filters/useragent-v3.1.3.asciidoc +++ /dev/null @@ -1,118 +0,0 @@ -:plugin: useragent -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-07-06 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-useragent/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Useragent filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Parse user agent strings into structured data based on BrowserScope data - -UserAgent filter, adds information about user agent like family, operating -system, version, and device - -Logstash releases ship with the regexes.yaml database made available from -ua-parser with an Apache 2.0 license. For more details on ua-parser, see -. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Useragent Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-lru_cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-regexes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] -===== `lru_cache_size` - - * Value type is <> - * Default value is `1000` - -UA parsing is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that -user agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. - -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global. That is to say all instances of the user agent filter -share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-prefix"] -===== `prefix` - - * Value type is <> - * Default value is `""` - -A string to prepend to all of the extracted keys - -[id="{version}-plugins-{type}s-{plugin}-regexes"] -===== `regexes` - - * Value type is <> - * There is no default value for this setting. - -`regexes.yaml` file to use - -If not specified, this will default to the `regexes.yaml` that ships -with logstash. - -You can find the latest version of this here: - - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field containing the user agent string. If this field is an -array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -The name of the field to assign user agent data into. - -If not specified user agent data will be stored in the root of the event. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/useragent-v3.2.0.asciidoc b/docs/versioned-plugins/filters/useragent-v3.2.0.asciidoc deleted file mode 100644 index ad2b8f5f3..000000000 --- a/docs/versioned-plugins/filters/useragent-v3.2.0.asciidoc +++ /dev/null @@ -1,118 +0,0 @@ -:plugin: useragent -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.0 -:release_date: 2017-07-10 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-useragent/blob/v3.2.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Useragent filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Parse user agent strings into structured data based on BrowserScope data - -UserAgent filter, adds information about user agent like family, operating -system, version, and device - -Logstash releases ship with the regexes.yaml database made available from -ua-parser with an Apache 2.0 license. For more details on ua-parser, see -. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Useragent Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-lru_cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-regexes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] -===== `lru_cache_size` - - * Value type is <> - * Default value is `1000` - -UA parsing is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that -user agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. - -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global. That is to say all instances of the user agent filter -share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-prefix"] -===== `prefix` - - * Value type is <> - * Default value is `""` - -A string to prepend to all of the extracted keys - -[id="{version}-plugins-{type}s-{plugin}-regexes"] -===== `regexes` - - * Value type is <> - * There is no default value for this setting. - -`regexes.yaml` file to use - -If not specified, this will default to the `regexes.yaml` that ships -with logstash. - -You can find the latest version of this here: - - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field containing the user agent string. If this field is an -array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -The name of the field to assign user agent data into. - -If not specified user agent data will be stored in the root of the event. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/useragent-v3.2.1.asciidoc b/docs/versioned-plugins/filters/useragent-v3.2.1.asciidoc deleted file mode 100644 index 27ab74ef8..000000000 --- a/docs/versioned-plugins/filters/useragent-v3.2.1.asciidoc +++ /dev/null @@ -1,118 +0,0 @@ -:plugin: useragent -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.1 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-useragent/blob/v3.2.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Useragent filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Parse user agent strings into structured data based on BrowserScope data - -UserAgent filter, adds information about user agent like family, operating -system, version, and device - -Logstash releases ship with the regexes.yaml database made available from -ua-parser with an Apache 2.0 license. For more details on ua-parser, see -. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Useragent Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-lru_cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-regexes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] -===== `lru_cache_size` - - * Value type is <> - * Default value is `1000` - -UA parsing is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that -user agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. - -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global. That is to say all instances of the user agent filter -share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-prefix"] -===== `prefix` - - * Value type is <> - * Default value is `""` - -A string to prepend to all of the extracted keys - -[id="{version}-plugins-{type}s-{plugin}-regexes"] -===== `regexes` - - * Value type is <> - * There is no default value for this setting. - -`regexes.yaml` file to use - -If not specified, this will default to the `regexes.yaml` that ships -with logstash. - -You can find the latest version of this here: - - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field containing the user agent string. If this field is an -array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -The name of the field to assign user agent data into. - -If not specified user agent data will be stored in the root of the event. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/useragent-v3.2.2.asciidoc b/docs/versioned-plugins/filters/useragent-v3.2.2.asciidoc deleted file mode 100644 index d8f42111b..000000000 --- a/docs/versioned-plugins/filters/useragent-v3.2.2.asciidoc +++ /dev/null @@ -1,118 +0,0 @@ -:plugin: useragent -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.2 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-useragent/blob/v3.2.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Useragent filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Parse user agent strings into structured data based on BrowserScope data - -UserAgent filter, adds information about user agent like family, operating -system, version, and device - -Logstash releases ship with the regexes.yaml database made available from -ua-parser with an Apache 2.0 license. For more details on ua-parser, see -. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Useragent Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-lru_cache_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-regexes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-lru_cache_size"] -===== `lru_cache_size` - - * Value type is <> - * Default value is `1000` - -UA parsing is surprisingly expensive. This filter uses an LRU cache to take advantage of the fact that -user agents are often found adjacent to one another in log files and rarely have a random distribution. -The higher you set this the more likely an item is to be in the cache and the faster this filter will run. -However, if you set this too high you can use more memory than desired. - -Experiment with different values for this option to find the best performance for your dataset. - -This MUST be set to a value > 0. There is really no reason to not want this behavior, the overhead is minimal -and the speed gains are large. - -It is important to note that this config value is global. That is to say all instances of the user agent filter -share the same cache. The last declared cache size will 'win'. The reason for this is that there would be no benefit -to having multiple caches for different instances at different points in the pipeline, that would just increase the -number of cache misses and waste memory. - -[id="{version}-plugins-{type}s-{plugin}-prefix"] -===== `prefix` - - * Value type is <> - * Default value is `""` - -A string to prepend to all of the extracted keys - -[id="{version}-plugins-{type}s-{plugin}-regexes"] -===== `regexes` - - * Value type is <> - * There is no default value for this setting. - -`regexes.yaml` file to use - -If not specified, this will default to the `regexes.yaml` that ships -with logstash. - -You can find the latest version of this here: - - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field containing the user agent string. If this field is an -array, only the first value will be used. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -The name of the field to assign user agent data into. - -If not specified user agent data will be stored in the root of the event. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/uuid-index.asciidoc b/docs/versioned-plugins/filters/uuid-index.asciidoc deleted file mode 100644 index 0895fd830..000000000 --- a/docs/versioned-plugins/filters/uuid-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: uuid -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::uuid-v3.0.5.asciidoc[] -include::uuid-v3.0.4.asciidoc[] -include::uuid-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/filters/uuid-v3.0.3.asciidoc b/docs/versioned-plugins/filters/uuid-v3.0.3.asciidoc deleted file mode 100644 index 6f3565034..000000000 --- a/docs/versioned-plugins/filters/uuid-v3.0.3.asciidoc +++ /dev/null @@ -1,95 +0,0 @@ -:plugin: uuid -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-uuid/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Uuid filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The uuid filter allows you to generate a -https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID] -and add it as a field to each processed event. - -This is useful if you need to generate a string that's unique for every -event, even if the same input is processed multiple times. If you want -to generate strings that are identical each time a event with a given -content is processed (i.e. a hash) you should use the -<> instead. - -The generated UUIDs follow the version 4 definition in -https://tools.ietf.org/html/rfc4122[RFC 4122]) and will be -represented as a standard hexadecimal string format, e.g. -"e08806fe-02af-406c-bbde-8a5ae4475e57". - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Uuid Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-overwrite"] -===== `overwrite` - - * Value type is <> - * Default value is `false` - -If the value in the field currently (if any) should be overridden -by the generated UUID. Defaults to `false` (i.e. if the field is -present, with ANY value, it won't be overridden) - -Example: -[source,ruby] - filter { - uuid { - target => "uuid" - overwrite => true - } - } - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Select the name of the field where the generated UUID should be -stored. - -Example: -[source,ruby] - filter { - uuid { - target => "uuid" - } - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/uuid-v3.0.4.asciidoc b/docs/versioned-plugins/filters/uuid-v3.0.4.asciidoc deleted file mode 100644 index 7f737a590..000000000 --- a/docs/versioned-plugins/filters/uuid-v3.0.4.asciidoc +++ /dev/null @@ -1,95 +0,0 @@ -:plugin: uuid -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-uuid/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Uuid filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The uuid filter allows you to generate a -https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID] -and add it as a field to each processed event. - -This is useful if you need to generate a string that's unique for every -event, even if the same input is processed multiple times. If you want -to generate strings that are identical each time a event with a given -content is processed (i.e. a hash) you should use the -<> instead. - -The generated UUIDs follow the version 4 definition in -https://tools.ietf.org/html/rfc4122[RFC 4122]) and will be -represented as a standard hexadecimal string format, e.g. -"e08806fe-02af-406c-bbde-8a5ae4475e57". - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Uuid Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-overwrite"] -===== `overwrite` - - * Value type is <> - * Default value is `false` - -If the value in the field currently (if any) should be overridden -by the generated UUID. Defaults to `false` (i.e. if the field is -present, with ANY value, it won't be overridden) - -Example: -[source,ruby] - filter { - uuid { - target => "uuid" - overwrite => true - } - } - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Select the name of the field where the generated UUID should be -stored. - -Example: -[source,ruby] - filter { - uuid { - target => "uuid" - } - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/uuid-v3.0.5.asciidoc b/docs/versioned-plugins/filters/uuid-v3.0.5.asciidoc deleted file mode 100644 index 586a1aba4..000000000 --- a/docs/versioned-plugins/filters/uuid-v3.0.5.asciidoc +++ /dev/null @@ -1,95 +0,0 @@ -:plugin: uuid -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-uuid/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Uuid filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The uuid filter allows you to generate a -https://en.wikipedia.org/wiki/Universally_unique_identifier[UUID] -and add it as a field to each processed event. - -This is useful if you need to generate a string that's unique for every -event, even if the same input is processed multiple times. If you want -to generate strings that are identical each time a event with a given -content is processed (i.e. a hash) you should use the -<> instead. - -The generated UUIDs follow the version 4 definition in -https://tools.ietf.org/html/rfc4122[RFC 4122]) and will be -represented as a standard hexadecimal string format, e.g. -"e08806fe-02af-406c-bbde-8a5ae4475e57". - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Uuid Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-overwrite"] -===== `overwrite` - - * Value type is <> - * Default value is `false` - -If the value in the field currently (if any) should be overridden -by the generated UUID. Defaults to `false` (i.e. if the field is -present, with ANY value, it won't be overridden) - -Example: -[source,ruby] - filter { - uuid { - target => "uuid" - overwrite => true - } - } - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Select the name of the field where the generated UUID should be -stored. - -Example: -[source,ruby] - filter { - uuid { - target => "uuid" - } - } - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/xml-index.asciidoc b/docs/versioned-plugins/filters/xml-index.asciidoc deleted file mode 100644 index 3e29bb025..000000000 --- a/docs/versioned-plugins/filters/xml-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: xml -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::xml-v4.0.5.asciidoc[] -include::xml-v4.0.4.asciidoc[] -include::xml-v4.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/filters/xml-v4.0.3.asciidoc b/docs/versioned-plugins/filters/xml-v4.0.3.asciidoc deleted file mode 100644 index 4964fb159..000000000 --- a/docs/versioned-plugins/filters/xml-v4.0.3.asciidoc +++ /dev/null @@ -1,187 +0,0 @@ -:plugin: xml -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-xml/blob/v4.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Xml filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -XML filter. Takes a field that contains XML and expands it into -an actual datastructure. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Xml Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-force_array>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-force_content>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-namespaces>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-remove_namespaces>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-store_xml>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-suppress_empty>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-xpath>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-force_array"] -===== `force_array` - - * Value type is <> - * Default value is `true` - -By default the filter will force single elements to be arrays. Setting this to -false will prevent storing single elements in arrays. - -[id="{version}-plugins-{type}s-{plugin}-force_content"] -===== `force_content` - - * Value type is <> - * Default value is `false` - -By default the filter will expand attributes differently from content inside -of tags. This option allows you to force text content and attributes to always -parse to a hash value. - -[id="{version}-plugins-{type}s-{plugin}-namespaces"] -===== `namespaces` - - * Value type is <> - * Default value is `{}` - -By default only namespaces declarations on the root element are considered. -This allows to configure all namespace declarations to parse the XML document. - -Example: - -[source,ruby] -filter { - xml { - namespaces => { - "xsl" => "http://www.w3.org/1999/XSL/Transform" - "xhtml" => http://www.w3.org/1999/xhtml" - } - } -} - - -[id="{version}-plugins-{type}s-{plugin}-remove_namespaces"] -===== `remove_namespaces` - - * Value type is <> - * Default value is `false` - -Remove all namespaces from all nodes in the document. -Of course, if the document had nodes with the same names but different namespaces, they will now be ambiguous. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Config for xml to hash is: -[source,ruby] - source => source_field - -For example, if you have the whole XML document in your `message` field: -[source,ruby] - filter { - xml { - source => "message" - } - } - -The above would parse the XML from the `message` field. - -[id="{version}-plugins-{type}s-{plugin}-store_xml"] -===== `store_xml` - - * Value type is <> - * Default value is `true` - -By default the filter will store the whole parsed XML in the destination -field as described above. Setting this to false will prevent that. - -[id="{version}-plugins-{type}s-{plugin}-suppress_empty"] -===== `suppress_empty` - - * Value type is <> - * Default value is `true` - -By default, output nothing if the element is empty. -If set to `false`, empty element will result in an empty hash object. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define target for placing the data - -For example if you want the data to be put in the `doc` field: -[source,ruby] - filter { - xml { - target => "doc" - } - } - -XML in the value of the source field will be expanded into a -datastructure in the `target` field. -Note: if the `target` field already exists, it will be overridden. -Required if `store_xml` is true (which is the default). - -[id="{version}-plugins-{type}s-{plugin}-xpath"] -===== `xpath` - - * Value type is <> - * Default value is `{}` - -xpath will additionally select string values (non-strings will be -converted to strings with Ruby's `to_s` function) from parsed XML -(using each source field defined using the method above) and place -those values in the destination fields. Configuration: -[source,ruby] -xpath => [ "xpath-syntax", "destination-field" ] - -Values returned by XPath parsing from `xpath-syntax` will be put in the -destination field. Multiple values returned will be pushed onto the -destination field as an array. As such, multiple matches across -multiple source fields will produce duplicate entries in the field. - -More on XPath: http://www.w3schools.com/xml/xml_xpath.asp - -The XPath functions are particularly powerful: -http://www.w3schools.com/xsl/xsl_functions.asp - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/xml-v4.0.4.asciidoc b/docs/versioned-plugins/filters/xml-v4.0.4.asciidoc deleted file mode 100644 index 085ffde90..000000000 --- a/docs/versioned-plugins/filters/xml-v4.0.4.asciidoc +++ /dev/null @@ -1,187 +0,0 @@ -:plugin: xml -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-xml/blob/v4.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Xml filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -XML filter. Takes a field that contains XML and expands it into -an actual datastructure. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Xml Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-force_array>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-force_content>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-namespaces>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-remove_namespaces>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-store_xml>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-suppress_empty>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-xpath>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-force_array"] -===== `force_array` - - * Value type is <> - * Default value is `true` - -By default the filter will force single elements to be arrays. Setting this to -false will prevent storing single elements in arrays. - -[id="{version}-plugins-{type}s-{plugin}-force_content"] -===== `force_content` - - * Value type is <> - * Default value is `false` - -By default the filter will expand attributes differently from content inside -of tags. This option allows you to force text content and attributes to always -parse to a hash value. - -[id="{version}-plugins-{type}s-{plugin}-namespaces"] -===== `namespaces` - - * Value type is <> - * Default value is `{}` - -By default only namespaces declarations on the root element are considered. -This allows to configure all namespace declarations to parse the XML document. - -Example: - -[source,ruby] -filter { - xml { - namespaces => { - "xsl" => "http://www.w3.org/1999/XSL/Transform" - "xhtml" => "http://www.w3.org/1999/xhtml" - } - } -} - - -[id="{version}-plugins-{type}s-{plugin}-remove_namespaces"] -===== `remove_namespaces` - - * Value type is <> - * Default value is `false` - -Remove all namespaces from all nodes in the document. -Of course, if the document had nodes with the same names but different namespaces, they will now be ambiguous. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Config for xml to hash is: -[source,ruby] - source => source_field - -For example, if you have the whole XML document in your `message` field: -[source,ruby] - filter { - xml { - source => "message" - } - } - -The above would parse the XML from the `message` field. - -[id="{version}-plugins-{type}s-{plugin}-store_xml"] -===== `store_xml` - - * Value type is <> - * Default value is `true` - -By default the filter will store the whole parsed XML in the destination -field as described above. Setting this to false will prevent that. - -[id="{version}-plugins-{type}s-{plugin}-suppress_empty"] -===== `suppress_empty` - - * Value type is <> - * Default value is `true` - -By default, output nothing if the element is empty. -If set to `false`, empty element will result in an empty hash object. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define target for placing the data - -For example if you want the data to be put in the `doc` field: -[source,ruby] - filter { - xml { - target => "doc" - } - } - -XML in the value of the source field will be expanded into a -datastructure in the `target` field. -Note: if the `target` field already exists, it will be overridden. -Required if `store_xml` is true (which is the default). - -[id="{version}-plugins-{type}s-{plugin}-xpath"] -===== `xpath` - - * Value type is <> - * Default value is `{}` - -xpath will additionally select string values (non-strings will be -converted to strings with Ruby's `to_s` function) from parsed XML -(using each source field defined using the method above) and place -those values in the destination fields. Configuration: -[source,ruby] -xpath => [ "xpath-syntax", "destination-field" ] - -Values returned by XPath parsing from `xpath-syntax` will be put in the -destination field. Multiple values returned will be pushed onto the -destination field as an array. As such, multiple matches across -multiple source fields will produce duplicate entries in the field. - -More on XPath: http://www.w3schools.com/xml/xml_xpath.asp - -The XPath functions are particularly powerful: -http://www.w3schools.com/xsl/xsl_functions.asp - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/xml-v4.0.5.asciidoc b/docs/versioned-plugins/filters/xml-v4.0.5.asciidoc deleted file mode 100644 index d3908bd2d..000000000 --- a/docs/versioned-plugins/filters/xml-v4.0.5.asciidoc +++ /dev/null @@ -1,187 +0,0 @@ -:plugin: xml -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-xml/blob/v4.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Xml filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -XML filter. Takes a field that contains XML and expands it into -an actual datastructure. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Xml Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-force_array>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-force_content>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-namespaces>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-remove_namespaces>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-store_xml>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-suppress_empty>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-xpath>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-force_array"] -===== `force_array` - - * Value type is <> - * Default value is `true` - -By default the filter will force single elements to be arrays. Setting this to -false will prevent storing single elements in arrays. - -[id="{version}-plugins-{type}s-{plugin}-force_content"] -===== `force_content` - - * Value type is <> - * Default value is `false` - -By default the filter will expand attributes differently from content inside -of tags. This option allows you to force text content and attributes to always -parse to a hash value. - -[id="{version}-plugins-{type}s-{plugin}-namespaces"] -===== `namespaces` - - * Value type is <> - * Default value is `{}` - -By default only namespaces declarations on the root element are considered. -This allows to configure all namespace declarations to parse the XML document. - -Example: - -[source,ruby] -filter { - xml { - namespaces => { - "xsl" => "http://www.w3.org/1999/XSL/Transform" - "xhtml" => "http://www.w3.org/1999/xhtml" - } - } -} - - -[id="{version}-plugins-{type}s-{plugin}-remove_namespaces"] -===== `remove_namespaces` - - * Value type is <> - * Default value is `false` - -Remove all namespaces from all nodes in the document. -Of course, if the document had nodes with the same names but different namespaces, they will now be ambiguous. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Config for xml to hash is: -[source,ruby] - source => source_field - -For example, if you have the whole XML document in your `message` field: -[source,ruby] - filter { - xml { - source => "message" - } - } - -The above would parse the XML from the `message` field. - -[id="{version}-plugins-{type}s-{plugin}-store_xml"] -===== `store_xml` - - * Value type is <> - * Default value is `true` - -By default the filter will store the whole parsed XML in the destination -field as described above. Setting this to false will prevent that. - -[id="{version}-plugins-{type}s-{plugin}-suppress_empty"] -===== `suppress_empty` - - * Value type is <> - * Default value is `true` - -By default, output nothing if the element is empty. -If set to `false`, empty element will result in an empty hash object. - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define target for placing the data - -For example if you want the data to be put in the `doc` field: -[source,ruby] - filter { - xml { - target => "doc" - } - } - -XML in the value of the source field will be expanded into a -datastructure in the `target` field. -Note: if the `target` field already exists, it will be overridden. -Required if `store_xml` is true (which is the default). - -[id="{version}-plugins-{type}s-{plugin}-xpath"] -===== `xpath` - - * Value type is <> - * Default value is `{}` - -xpath will additionally select string values (non-strings will be -converted to strings with Ruby's `to_s` function) from parsed XML -(using each source field defined using the method above) and place -those values in the destination fields. Configuration: -[source,ruby] -xpath => [ "xpath-syntax", "destination-field" ] - -Values returned by XPath parsing from `xpath-syntax` will be put in the -destination field. Multiple values returned will be pushed onto the -destination field as an array. As such, multiple matches across -multiple source fields will produce duplicate entries in the field. - -More on XPath: http://www.w3schools.com/xml/xml_xpath.asp - -The XPath functions are particularly powerful: -http://www.w3schools.com/xsl/xsl_functions.asp - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/yaml-index.asciidoc b/docs/versioned-plugins/filters/yaml-index.asciidoc deleted file mode 100644 index 90b1f60a1..000000000 --- a/docs/versioned-plugins/filters/yaml-index.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -:plugin: yaml -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-06-23 -|======================================================================= - -include::yaml-v0.1.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/yaml-v0.1.1.asciidoc b/docs/versioned-plugins/filters/yaml-v0.1.1.asciidoc deleted file mode 100644 index 668533145..000000000 --- a/docs/versioned-plugins/filters/yaml-v0.1.1.asciidoc +++ /dev/null @@ -1,103 +0,0 @@ -:plugin: yaml -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v0.1.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-yaml/blob/v0.1.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Yaml filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This is a YAML parsing filter. It takes an existing field which contains YAML and -expands it into an actual data structure within the Logstash event. - -By default it will place the parsed YAML in the root (top level) of the Logstash event, but this -filter can be configured to place the YAML into any arbitrary event field, using the -`target` configuration. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Yaml Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-source>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-exclude_tags"] -===== `exclude_tags` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `[]` - -Only handle events without any of these tags. -Optional. - -[id="{version}-plugins-{type}s-{plugin}-source"] -===== `source` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The configuration for the YAML filter: -[source,ruby] - source => source_field - -For example, if you have YAML data in the @message field: -[source,ruby] - filter { - yaml { - source => "message" - } - } - -The above would parse the yaml from the @message field - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define the target field for placing the parsed data. If this setting is -omitted, the YAML data will be stored at the root (top level) of the event. - -For example, if you want the data to be put in the `doc` field: -[source,ruby] - filter { - yaml { - target => "doc" - } - } - -YAML in the value of the `source` field will be expanded into a -data structure in the `target` field. - -NOTE: if the `target` field already exists, it will be overwritten! - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/zeromq-index.asciidoc b/docs/versioned-plugins/filters/zeromq-index.asciidoc deleted file mode 100644 index f02877fa0..000000000 --- a/docs/versioned-plugins/filters/zeromq-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: zeromq -:type: filter - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::zeromq-v3.0.2.asciidoc[] -include::zeromq-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/filters/zeromq-v3.0.1.asciidoc b/docs/versioned-plugins/filters/zeromq-v3.0.1.asciidoc deleted file mode 100644 index c8ade864b..000000000 --- a/docs/versioned-plugins/filters/zeromq-v3.0.1.asciidoc +++ /dev/null @@ -1,148 +0,0 @@ -:plugin: zeromq -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-zeromq/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Zeromq filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -ZeroMQ filter. This is the best way to send an event externally for filtering -It works much like an exec filter would by sending the event "offsite" -for processing and waiting for a response - -The protocol here is: - * REQ sent with JSON-serialized logstash event - * REP read expected to be the full JSON 'filtered' event - * - if reply read is an empty string, it will cancel the event. - -Note that this is a limited subset of the zeromq functionality in -inputs and outputs. The only topology that makes sense here is: -REQ/REP. bunde - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Zeromq Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-add_tag_on_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sentinel>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sockopt>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-add_tag_on_timeout"] -===== `add_tag_on_timeout` - - * Value type is <> - * Default value is `"zeromqtimeout"` - -tag to add if zeromq timeout expires before getting back an answer. -If set to "" then no tag will be added. - -[id="{version}-plugins-{type}s-{plugin}-address"] -===== `address` - - * Value type is <> - * Default value is `"tcp://127.0.0.1:2121"` - -0mq socket address to connect or bind -Please note that inproc:// will not work with logstash -as we use a context per thread -By default, filters connect - -[id="{version}-plugins-{type}s-{plugin}-field"] -===== `field` - - * Value type is <> - * There is no default value for this setting. - -The field to send off-site for processing -If this is unset, the whole event will be sent - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"client"` - -0mq mode -server mode binds/listens -client mode connects - -[id="{version}-plugins-{type}s-{plugin}-retries"] -===== `retries` - - * Value type is <> - * Default value is `3` - -number of retries, used for both sending and receiving messages. -for sending, retries should return instantly. -for receiving, the total blocking time is up to retries X timeout, -which by default is 3 X 500 = 1500ms - -[id="{version}-plugins-{type}s-{plugin}-sentinel"] -===== `sentinel` - - * Value type is <> - * Default value is `""` - -A sentinel value to signal the filter to cancel the event -If the peer returns the sentinel value, the event will be cancelled - -[id="{version}-plugins-{type}s-{plugin}-sockopt"] -===== `sockopt` - - * Value type is <> - * There is no default value for this setting. - -0mq socket options -This exposes zmq_setsockopt -for advanced tuning -see http://api.zeromq.org/2-1:zmq-setsockopt for details - -This is where you would set values like: -ZMQ::HWM - high water mark -ZMQ::IDENTITY - named queues -ZMQ::SWAP_SIZE - space for disk overflow -ZMQ::SUBSCRIBE - topic filters for pubsub - -example: sockopt => ["ZMQ::HWM", 50, "ZMQ::IDENTITY", "my_named_queue"] - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `500` - -timeout in milliseconds on which to wait for a reply. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/filters/zeromq-v3.0.2.asciidoc b/docs/versioned-plugins/filters/zeromq-v3.0.2.asciidoc deleted file mode 100644 index dc184ff9c..000000000 --- a/docs/versioned-plugins/filters/zeromq-v3.0.2.asciidoc +++ /dev/null @@ -1,148 +0,0 @@ -:plugin: zeromq -:type: filter - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-filter-zeromq/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Zeromq filter plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -ZeroMQ filter. This is the best way to send an event externally for filtering -It works much like an exec filter would by sending the event "offsite" -for processing and waiting for a response - -The protocol here is: - * REQ sent with JSON-serialized logstash event - * REP read expected to be the full JSON 'filtered' event - * - if reply read is an empty string, it will cancel the event. - -Note that this is a limited subset of the zeromq functionality in -inputs and outputs. The only topology that makes sense here is: -REQ/REP. bunde - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Zeromq Filter Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-add_tag_on_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sentinel>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sockopt>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -filter plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-add_tag_on_timeout"] -===== `add_tag_on_timeout` - - * Value type is <> - * Default value is `"zeromqtimeout"` - -tag to add if zeromq timeout expires before getting back an answer. -If set to "" then no tag will be added. - -[id="{version}-plugins-{type}s-{plugin}-address"] -===== `address` - - * Value type is <> - * Default value is `"tcp://127.0.0.1:2121"` - -0mq socket address to connect or bind -Please note that inproc:// will not work with logstash -as we use a context per thread -By default, filters connect - -[id="{version}-plugins-{type}s-{plugin}-field"] -===== `field` - - * Value type is <> - * There is no default value for this setting. - -The field to send off-site for processing -If this is unset, the whole event will be sent - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"client"` - -0mq mode -server mode binds/listens -client mode connects - -[id="{version}-plugins-{type}s-{plugin}-retries"] -===== `retries` - - * Value type is <> - * Default value is `3` - -number of retries, used for both sending and receiving messages. -for sending, retries should return instantly. -for receiving, the total blocking time is up to retries X timeout, -which by default is 3 X 500 = 1500ms - -[id="{version}-plugins-{type}s-{plugin}-sentinel"] -===== `sentinel` - - * Value type is <> - * Default value is `""` - -A sentinel value to signal the filter to cancel the event -If the peer returns the sentinel value, the event will be cancelled - -[id="{version}-plugins-{type}s-{plugin}-sockopt"] -===== `sockopt` - - * Value type is <> - * There is no default value for this setting. - -0mq socket options -This exposes zmq_setsockopt -for advanced tuning -see http://api.zeromq.org/2-1:zmq-setsockopt for details - -This is where you would set values like: -ZMQ::HWM - high water mark -ZMQ::IDENTITY - named queues -ZMQ::SWAP_SIZE - space for disk overflow -ZMQ::SUBSCRIBE - topic filters for pubsub - -example: sockopt => ["ZMQ::HWM", 50, "ZMQ::IDENTITY", "my_named_queue"] - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `500` - -timeout in milliseconds on which to wait for a reply. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs-index.asciidoc b/docs/versioned-plugins/inputs-index.asciidoc index 2cdec3326..2d56074ae 100644 --- a/docs/versioned-plugins/inputs-index.asciidoc +++ b/docs/versioned-plugins/inputs-index.asciidoc @@ -4,71 +4,7 @@ include::include/plugin-intro.asciidoc[] include::inputs/beats-index.asciidoc[] -include::inputs/cloudwatch-index.asciidoc[] -include::inputs/cloudwatch_logs-index.asciidoc[] -include::inputs/couchdb_changes-index.asciidoc[] -include::inputs/dead_letter_queue-index.asciidoc[] -include::inputs/drupal_dblog-index.asciidoc[] -include::inputs/dynamodb-index.asciidoc[] -include::inputs/elasticsearch-index.asciidoc[] -include::inputs/eventlog-index.asciidoc[] -include::inputs/example-index.asciidoc[] -include::inputs/exec-index.asciidoc[] -include::inputs/file-index.asciidoc[] -include::inputs/fluentd-index.asciidoc[] -include::inputs/ganglia-index.asciidoc[] -include::inputs/gelf-index.asciidoc[] -include::inputs/gemfire-index.asciidoc[] -include::inputs/generator-index.asciidoc[] -include::inputs/github-index.asciidoc[] -include::inputs/google_pubsub-index.asciidoc[] -include::inputs/googleanalytics-index.asciidoc[] -include::inputs/graphite-index.asciidoc[] -include::inputs/heartbeat-index.asciidoc[] -include::inputs/heroku-index.asciidoc[] include::inputs/http-index.asciidoc[] -include::inputs/http_poller-index.asciidoc[] -include::inputs/imap-index.asciidoc[] -include::inputs/irc-index.asciidoc[] -include::inputs/jdbc-index.asciidoc[] -include::inputs/jms-index.asciidoc[] -include::inputs/jmx-index.asciidoc[] -include::inputs/jmx-pipe-index.asciidoc[] -include::inputs/journald-index.asciidoc[] -include::inputs/kafka-index.asciidoc[] -include::inputs/kinesis-index.asciidoc[] -include::inputs/log4j-index.asciidoc[] -include::inputs/log4j2-index.asciidoc[] -include::inputs/lumberjack-index.asciidoc[] -include::inputs/meetup-index.asciidoc[] -include::inputs/mongodb-index.asciidoc[] -include::inputs/neo4j-index.asciidoc[] -include::inputs/netflow-index.asciidoc[] -include::inputs/perfmon-index.asciidoc[] -include::inputs/pipe-index.asciidoc[] -include::inputs/puppet_facter-index.asciidoc[] -include::inputs/rabbitmq-index.asciidoc[] -include::inputs/rackspace-index.asciidoc[] -include::inputs/redis-index.asciidoc[] -include::inputs/relp-index.asciidoc[] -include::inputs/rss-index.asciidoc[] include::inputs/s3-index.asciidoc[] -include::inputs/s3sqs-index.asciidoc[] -include::inputs/salesforce-index.asciidoc[] -include::inputs/snmptrap-index.asciidoc[] -include::inputs/sqlite-index.asciidoc[] -include::inputs/sqs-index.asciidoc[] -include::inputs/stdin-index.asciidoc[] -include::inputs/stomp-index.asciidoc[] -include::inputs/syslog-index.asciidoc[] include::inputs/tcp-index.asciidoc[] -include::inputs/twitter-index.asciidoc[] -include::inputs/udp-index.asciidoc[] -include::inputs/unix-index.asciidoc[] -include::inputs/varnishlog-index.asciidoc[] -include::inputs/websocket-index.asciidoc[] -include::inputs/wmi-index.asciidoc[] -include::inputs/xmpp-index.asciidoc[] -include::inputs/zenoss-index.asciidoc[] -include::inputs/zeromq-index.asciidoc[] diff --git a/docs/versioned-plugins/inputs/cloudwatch-index.asciidoc b/docs/versioned-plugins/inputs/cloudwatch-index.asciidoc deleted file mode 100644 index 6bb5d8072..000000000 --- a/docs/versioned-plugins/inputs/cloudwatch-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: cloudwatch -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::cloudwatch-v2.0.3.asciidoc[] -include::cloudwatch-v2.0.2.asciidoc[] -include::cloudwatch-v2.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/cloudwatch-v2.0.1.asciidoc b/docs/versioned-plugins/inputs/cloudwatch-v2.0.1.asciidoc deleted file mode 100644 index 26633ef13..000000000 --- a/docs/versioned-plugins/inputs/cloudwatch-v2.0.1.asciidoc +++ /dev/null @@ -1,266 +0,0 @@ -:plugin: cloudwatch -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-cloudwatch/blob/v2.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Cloudwatch input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Pull events from the Amazon Web Services CloudWatch API. - -To use this plugin, you *must* have an AWS account, and the following policy - -Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user. -A sample policy for EC2 metrics is as follows: -[source,json] - { - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "Stmt1444715676000", - "Effect": "Allow", - "Action": [ - "cloudwatch:GetMetricStatistics", - "cloudwatch:ListMetrics" - ], - "Resource": "*" - }, - { - "Sid": "Stmt1444716576170", - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances" - ], - "Resource": "*" - } - ] - } - -See http://aws.amazon.com/iam/ for more details on setting up AWS identities. - -# Configuration Example -[source, ruby] - input { - cloudwatch { - namespace => "AWS/EC2" - metrics => [ "CPUUtilization" ] - filters => { "tag:Group" => "API-Production" } - region => "us-east-1" - } - } - - input { - cloudwatch { - namespace => "AWS/EBS" - metrics => ["VolumeQueueLength"] - filters => { "tag:Monitoring" => "Yes" } - region => "us-east-1" - } - } - - input { - cloudwatch { - namespace => "AWS/RDS" - metrics => ["CPUUtilization", "CPUCreditUsage"] - filters => { "EngineName" => "mysql" } # Only supports EngineName, DatabaseClass and DBInstanceIdentifier - region => "us-east-1" - } - } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cloudwatch Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-combined>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-filters>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-period>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-statistics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_ssl>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-combined"] -===== `combined` - - * Value type is <> - * Default value is `false` - -Use this for namespaces that need to combine the dimensions like S3 and SNS. - -[id="{version}-plugins-{type}s-{plugin}-filters"] -===== `filters` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Specify the filters to apply when fetching resources: - -This needs to follow the AWS convention of specifiying filters. -Instances: { 'instance-id' => 'i-12344321' } -Tags: { "tag:Environment" => "Production" } -Volumes: { 'attachment.status' => 'attached' } -Each namespace uniquely support certian dimensions. Please consult the documentation -to ensure you're using valid filters. - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `900` - -Set how frequently CloudWatch should be queried - -The default, `900`, means check every 15 minutes. Setting this value too low -(generally less than 300) results in no metrics being returned from CloudWatch. - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * Value type is <> - * Default value is `["CPUUtilization", "DiskReadOps", "DiskWriteOps", "NetworkIn", "NetworkOut"]` - -Specify the metrics to fetch for the namespace. The defaults are AWS/EC2 specific. See http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html -for the available metrics for other namespaces. - -[id="{version}-plugins-{type}s-{plugin}-namespace"] -===== `namespace` - - * Value type is <> - * Default value is `"AWS/EC2"` - -If undefined, LogStash will complain, even if codec is unused. -The service namespace of the metrics to fetch. - -The default is for the EC2 service. See http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html -for valid values. - -[id="{version}-plugins-{type}s-{plugin}-period"] -===== `period` - - * Value type is <> - * Default value is `300` - -Set the granularity of the returned datapoints. - -Must be at least 60 seconds and in multiples of 60. - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - -[id="{version}-plugins-{type}s-{plugin}-statistics"] -===== `statistics` - - * Value type is <> - * Default value is `["SampleCount", "Average", "Minimum", "Maximum", "Sum"]` - -Specify the statistics to fetch for each namespace - -[id="{version}-plugins-{type}s-{plugin}-use_ssl"] -===== `use_ssl` - - * Value type is <> - * Default value is `true` - -Make sure we require the V1 classes when including this module. -require 'aws-sdk' will load v2 classes. -Should we require (true) or disable (false) using SSL for communicating with the AWS API -The AWS SDK for Ruby defaults to SSL so we preserve that - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/cloudwatch-v2.0.2.asciidoc b/docs/versioned-plugins/inputs/cloudwatch-v2.0.2.asciidoc deleted file mode 100644 index bc8c01c8e..000000000 --- a/docs/versioned-plugins/inputs/cloudwatch-v2.0.2.asciidoc +++ /dev/null @@ -1,266 +0,0 @@ -:plugin: cloudwatch -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-cloudwatch/blob/v2.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Cloudwatch input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Pull events from the Amazon Web Services CloudWatch API. - -To use this plugin, you *must* have an AWS account, and the following policy - -Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user. -A sample policy for EC2 metrics is as follows: -[source,json] - { - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "Stmt1444715676000", - "Effect": "Allow", - "Action": [ - "cloudwatch:GetMetricStatistics", - "cloudwatch:ListMetrics" - ], - "Resource": "*" - }, - { - "Sid": "Stmt1444716576170", - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances" - ], - "Resource": "*" - } - ] - } - -See http://aws.amazon.com/iam/ for more details on setting up AWS identities. - -# Configuration Example -[source, ruby] - input { - cloudwatch { - namespace => "AWS/EC2" - metrics => [ "CPUUtilization" ] - filters => { "tag:Group" => "API-Production" } - region => "us-east-1" - } - } - - input { - cloudwatch { - namespace => "AWS/EBS" - metrics => ["VolumeQueueLength"] - filters => { "tag:Monitoring" => "Yes" } - region => "us-east-1" - } - } - - input { - cloudwatch { - namespace => "AWS/RDS" - metrics => ["CPUUtilization", "CPUCreditUsage"] - filters => { "EngineName" => "mysql" } # Only supports EngineName, DatabaseClass and DBInstanceIdentifier - region => "us-east-1" - } - } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cloudwatch Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-combined>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-filters>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-period>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-statistics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_ssl>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-combined"] -===== `combined` - - * Value type is <> - * Default value is `false` - -Use this for namespaces that need to combine the dimensions like S3 and SNS. - -[id="{version}-plugins-{type}s-{plugin}-filters"] -===== `filters` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Specify the filters to apply when fetching resources: - -This needs to follow the AWS convention of specifiying filters. -Instances: { 'instance-id' => 'i-12344321' } -Tags: { "tag:Environment" => "Production" } -Volumes: { 'attachment.status' => 'attached' } -Each namespace uniquely support certian dimensions. Please consult the documentation -to ensure you're using valid filters. - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `900` - -Set how frequently CloudWatch should be queried - -The default, `900`, means check every 15 minutes. Setting this value too low -(generally less than 300) results in no metrics being returned from CloudWatch. - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * Value type is <> - * Default value is `["CPUUtilization", "DiskReadOps", "DiskWriteOps", "NetworkIn", "NetworkOut"]` - -Specify the metrics to fetch for the namespace. The defaults are AWS/EC2 specific. See http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html -for the available metrics for other namespaces. - -[id="{version}-plugins-{type}s-{plugin}-namespace"] -===== `namespace` - - * Value type is <> - * Default value is `"AWS/EC2"` - -If undefined, LogStash will complain, even if codec is unused. -The service namespace of the metrics to fetch. - -The default is for the EC2 service. See http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html -for valid values. - -[id="{version}-plugins-{type}s-{plugin}-period"] -===== `period` - - * Value type is <> - * Default value is `300` - -Set the granularity of the returned datapoints. - -Must be at least 60 seconds and in multiples of 60. - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - -[id="{version}-plugins-{type}s-{plugin}-statistics"] -===== `statistics` - - * Value type is <> - * Default value is `["SampleCount", "Average", "Minimum", "Maximum", "Sum"]` - -Specify the statistics to fetch for each namespace - -[id="{version}-plugins-{type}s-{plugin}-use_ssl"] -===== `use_ssl` - - * Value type is <> - * Default value is `true` - -Make sure we require the V1 classes when including this module. -require 'aws-sdk' will load v2 classes. -Should we require (true) or disable (false) using SSL for communicating with the AWS API -The AWS SDK for Ruby defaults to SSL so we preserve that - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/cloudwatch-v2.0.3.asciidoc b/docs/versioned-plugins/inputs/cloudwatch-v2.0.3.asciidoc deleted file mode 100644 index 9ab8d443b..000000000 --- a/docs/versioned-plugins/inputs/cloudwatch-v2.0.3.asciidoc +++ /dev/null @@ -1,266 +0,0 @@ -:plugin: cloudwatch -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-cloudwatch/blob/v2.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Cloudwatch input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Pull events from the Amazon Web Services CloudWatch API. - -To use this plugin, you *must* have an AWS account, and the following policy - -Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user. -A sample policy for EC2 metrics is as follows: -[source,json] - { - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "Stmt1444715676000", - "Effect": "Allow", - "Action": [ - "cloudwatch:GetMetricStatistics", - "cloudwatch:ListMetrics" - ], - "Resource": "*" - }, - { - "Sid": "Stmt1444716576170", - "Effect": "Allow", - "Action": [ - "ec2:DescribeInstances" - ], - "Resource": "*" - } - ] - } - -See http://aws.amazon.com/iam/ for more details on setting up AWS identities. - -# Configuration Example -[source, ruby] - input { - cloudwatch { - namespace => "AWS/EC2" - metrics => [ "CPUUtilization" ] - filters => { "tag:Group" => "API-Production" } - region => "us-east-1" - } - } - - input { - cloudwatch { - namespace => "AWS/EBS" - metrics => ["VolumeQueueLength"] - filters => { "tag:Monitoring" => "Yes" } - region => "us-east-1" - } - } - - input { - cloudwatch { - namespace => "AWS/RDS" - metrics => ["CPUUtilization", "CPUCreditUsage"] - filters => { "EngineName" => "mysql" } # Only supports EngineName, DatabaseClass and DBInstanceIdentifier - region => "us-east-1" - } - } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cloudwatch Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-combined>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-filters>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-period>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-statistics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_ssl>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-combined"] -===== `combined` - - * Value type is <> - * Default value is `false` - -Use this for namespaces that need to combine the dimensions like S3 and SNS. - -[id="{version}-plugins-{type}s-{plugin}-filters"] -===== `filters` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Specify the filters to apply when fetching resources: - -This needs to follow the AWS convention of specifiying filters. -Instances: { 'instance-id' => 'i-12344321' } -Tags: { "tag:Environment" => "Production" } -Volumes: { 'attachment.status' => 'attached' } -Each namespace uniquely support certian dimensions. Please consult the documentation -to ensure you're using valid filters. - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `900` - -Set how frequently CloudWatch should be queried - -The default, `900`, means check every 15 minutes. Setting this value too low -(generally less than 300) results in no metrics being returned from CloudWatch. - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * Value type is <> - * Default value is `["CPUUtilization", "DiskReadOps", "DiskWriteOps", "NetworkIn", "NetworkOut"]` - -Specify the metrics to fetch for the namespace. The defaults are AWS/EC2 specific. See http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html -for the available metrics for other namespaces. - -[id="{version}-plugins-{type}s-{plugin}-namespace"] -===== `namespace` - - * Value type is <> - * Default value is `"AWS/EC2"` - -If undefined, LogStash will complain, even if codec is unused. -The service namespace of the metrics to fetch. - -The default is for the EC2 service. See http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html -for valid values. - -[id="{version}-plugins-{type}s-{plugin}-period"] -===== `period` - - * Value type is <> - * Default value is `300` - -Set the granularity of the returned datapoints. - -Must be at least 60 seconds and in multiples of 60. - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - -[id="{version}-plugins-{type}s-{plugin}-statistics"] -===== `statistics` - - * Value type is <> - * Default value is `["SampleCount", "Average", "Minimum", "Maximum", "Sum"]` - -Specify the statistics to fetch for each namespace - -[id="{version}-plugins-{type}s-{plugin}-use_ssl"] -===== `use_ssl` - - * Value type is <> - * Default value is `true` - -Make sure we require the V1 classes when including this module. -require 'aws-sdk' will load v2 classes. -Should we require (true) or disable (false) using SSL for communicating with the AWS API -The AWS SDK for Ruby defaults to SSL so we preserve that - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/cloudwatch_logs-index.asciidoc b/docs/versioned-plugins/inputs/cloudwatch_logs-index.asciidoc deleted file mode 100644 index 9aca25cc2..000000000 --- a/docs/versioned-plugins/inputs/cloudwatch_logs-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: cloudwatch_logs -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/inputs/couchdb_changes-index.asciidoc b/docs/versioned-plugins/inputs/couchdb_changes-index.asciidoc deleted file mode 100644 index 1749eebe7..000000000 --- a/docs/versioned-plugins/inputs/couchdb_changes-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: couchdb_changes -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::couchdb_changes-v3.1.4.asciidoc[] -include::couchdb_changes-v3.1.3.asciidoc[] -include::couchdb_changes-v3.1.2.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/couchdb_changes-v3.1.2.asciidoc b/docs/versioned-plugins/inputs/couchdb_changes-v3.1.2.asciidoc deleted file mode 100644 index 0801defb4..000000000 --- a/docs/versioned-plugins/inputs/couchdb_changes-v3.1.2.asciidoc +++ /dev/null @@ -1,220 +0,0 @@ -:plugin: couchdb_changes -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-couchdb_changes/blob/v3.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Couchdb_changes input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This CouchDB input allows you to automatically stream events from the -CouchDB http://guide.couchdb.org/draft/notifications.html[_changes] URI. -Moreover, any "future" changes will automatically be streamed as well making it easy to synchronize -your CouchDB data with any target destination - -### Upsert and delete -You can use event metadata to allow for document deletion. -All non-delete operations are treated as upserts - -### Starting at a Specific Sequence -The CouchDB input stores the last sequence number value in location defined by `sequence_path`. -You can use this fact to start or resume the stream at a particular sequence. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Couchdb_changes Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-always_reconnect>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ignore_attachments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-initial_sequence>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keep_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keep_revision>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sequence_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-always_reconnect"] -===== `always_reconnect` - - * Value type is <> - * Default value is `true` - -Reconnect flag. When true, always try to reconnect after a failure - -[id="{version}-plugins-{type}s-{plugin}-ca_file"] -===== `ca_file` - - * Value type is <> - * There is no default value for this setting. - -Path to a CA certificate file, used to validate certificates - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The CouchDB db to connect to. -Required parameter. - -[id="{version}-plugins-{type}s-{plugin}-heartbeat"] -===== `heartbeat` - - * Value type is <> - * Default value is `1000` - -Logstash connects to CouchDB's _changes with feed=continuous -The heartbeat is how often (in milliseconds) Logstash will ping -CouchDB to ensure the connection is maintained. Changing this -setting is not recommended unless you know what you are doing. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -IP or hostname of your CouchDB instance - -[id="{version}-plugins-{type}s-{plugin}-ignore_attachments"] -===== `ignore_attachments` - - * Value type is <> - * Default value is `true` - -Future feature! Until implemented, changing this from the default -will not do anything. - -Ignore attachments associated with CouchDB documents. - -[id="{version}-plugins-{type}s-{plugin}-initial_sequence"] -===== `initial_sequence` - - * Value type is <> - * There is no default value for this setting. - -If unspecified, Logstash will attempt to read the last sequence number -from the `sequence_path` file. If that is empty or non-existent, it will -begin with 0 (the beginning). - -If you specify this value, it is anticipated that you will -only be doing so for an initial read under special circumstances -and that you will unset this value afterwards. - -[id="{version}-plugins-{type}s-{plugin}-keep_id"] -===== `keep_id` - - * Value type is <> - * Default value is `false` - -Preserve the CouchDB document id "_id" value in the -output. - -[id="{version}-plugins-{type}s-{plugin}-keep_revision"] -===== `keep_revision` - - * Value type is <> - * Default value is `false` - -Preserve the CouchDB document revision "_rev" value in the -output. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `nil` - -Password, if authentication is needed to connect to -CouchDB - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5984` - -Port of your CouchDB instance. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_delay"] -===== `reconnect_delay` - - * Value type is <> - * Default value is `10` - -Reconnect delay: time between reconnect attempts, in seconds. - -[id="{version}-plugins-{type}s-{plugin}-secure"] -===== `secure` - - * Value type is <> - * Default value is `false` - -Connect to CouchDB's _changes feed securely (via https) -Default: false (via http) - -[id="{version}-plugins-{type}s-{plugin}-sequence_path"] -===== `sequence_path` - - * Value type is <> - * There is no default value for this setting. - -File path where the last sequence number in the _changes -stream is stored. If unset it will write to `$HOME/.couchdb_seq` - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * There is no default value for this setting. - -Timeout: Number of milliseconds to wait for new data before -terminating the connection. If a timeout is set it will disable -the heartbeat configuration option. - -[id="{version}-plugins-{type}s-{plugin}-username"] -===== `username` - - * Value type is <> - * Default value is `nil` - -Username, if authentication is needed to connect to -CouchDB - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/couchdb_changes-v3.1.3.asciidoc b/docs/versioned-plugins/inputs/couchdb_changes-v3.1.3.asciidoc deleted file mode 100644 index 9099e8d8b..000000000 --- a/docs/versioned-plugins/inputs/couchdb_changes-v3.1.3.asciidoc +++ /dev/null @@ -1,220 +0,0 @@ -:plugin: couchdb_changes -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-couchdb_changes/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Couchdb_changes input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This CouchDB input allows you to automatically stream events from the -CouchDB http://guide.couchdb.org/draft/notifications.html[_changes] URI. -Moreover, any "future" changes will automatically be streamed as well making it easy to synchronize -your CouchDB data with any target destination - -### Upsert and delete -You can use event metadata to allow for document deletion. -All non-delete operations are treated as upserts - -### Starting at a Specific Sequence -The CouchDB input stores the last sequence number value in location defined by `sequence_path`. -You can use this fact to start or resume the stream at a particular sequence. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Couchdb_changes Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-always_reconnect>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ignore_attachments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-initial_sequence>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keep_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keep_revision>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sequence_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-always_reconnect"] -===== `always_reconnect` - - * Value type is <> - * Default value is `true` - -Reconnect flag. When true, always try to reconnect after a failure - -[id="{version}-plugins-{type}s-{plugin}-ca_file"] -===== `ca_file` - - * Value type is <> - * There is no default value for this setting. - -Path to a CA certificate file, used to validate certificates - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The CouchDB db to connect to. -Required parameter. - -[id="{version}-plugins-{type}s-{plugin}-heartbeat"] -===== `heartbeat` - - * Value type is <> - * Default value is `1000` - -Logstash connects to CouchDB's _changes with feed=continuous -The heartbeat is how often (in milliseconds) Logstash will ping -CouchDB to ensure the connection is maintained. Changing this -setting is not recommended unless you know what you are doing. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -IP or hostname of your CouchDB instance - -[id="{version}-plugins-{type}s-{plugin}-ignore_attachments"] -===== `ignore_attachments` - - * Value type is <> - * Default value is `true` - -Future feature! Until implemented, changing this from the default -will not do anything. - -Ignore attachments associated with CouchDB documents. - -[id="{version}-plugins-{type}s-{plugin}-initial_sequence"] -===== `initial_sequence` - - * Value type is <> - * There is no default value for this setting. - -If unspecified, Logstash will attempt to read the last sequence number -from the `sequence_path` file. If that is empty or non-existent, it will -begin with 0 (the beginning). - -If you specify this value, it is anticipated that you will -only be doing so for an initial read under special circumstances -and that you will unset this value afterwards. - -[id="{version}-plugins-{type}s-{plugin}-keep_id"] -===== `keep_id` - - * Value type is <> - * Default value is `false` - -Preserve the CouchDB document id "_id" value in the -output. - -[id="{version}-plugins-{type}s-{plugin}-keep_revision"] -===== `keep_revision` - - * Value type is <> - * Default value is `false` - -Preserve the CouchDB document revision "_rev" value in the -output. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `nil` - -Password, if authentication is needed to connect to -CouchDB - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5984` - -Port of your CouchDB instance. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_delay"] -===== `reconnect_delay` - - * Value type is <> - * Default value is `10` - -Reconnect delay: time between reconnect attempts, in seconds. - -[id="{version}-plugins-{type}s-{plugin}-secure"] -===== `secure` - - * Value type is <> - * Default value is `false` - -Connect to CouchDB's _changes feed securely (via https) -Default: false (via http) - -[id="{version}-plugins-{type}s-{plugin}-sequence_path"] -===== `sequence_path` - - * Value type is <> - * There is no default value for this setting. - -File path where the last sequence number in the _changes -stream is stored. If unset it will write to `$HOME/.couchdb_seq` - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * There is no default value for this setting. - -Timeout: Number of milliseconds to wait for new data before -terminating the connection. If a timeout is set it will disable -the heartbeat configuration option. - -[id="{version}-plugins-{type}s-{plugin}-username"] -===== `username` - - * Value type is <> - * Default value is `nil` - -Username, if authentication is needed to connect to -CouchDB - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/couchdb_changes-v3.1.4.asciidoc b/docs/versioned-plugins/inputs/couchdb_changes-v3.1.4.asciidoc deleted file mode 100644 index 451c2cd96..000000000 --- a/docs/versioned-plugins/inputs/couchdb_changes-v3.1.4.asciidoc +++ /dev/null @@ -1,220 +0,0 @@ -:plugin: couchdb_changes -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.4 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-couchdb_changes/blob/v3.1.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Couchdb_changes input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This CouchDB input allows you to automatically stream events from the -CouchDB http://guide.couchdb.org/draft/notifications.html[_changes] URI. -Moreover, any "future" changes will automatically be streamed as well making it easy to synchronize -your CouchDB data with any target destination - -### Upsert and delete -You can use event metadata to allow for document deletion. -All non-delete operations are treated as upserts - -### Starting at a Specific Sequence -The CouchDB input stores the last sequence number value in location defined by `sequence_path`. -You can use this fact to start or resume the stream at a particular sequence. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Couchdb_changes Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-always_reconnect>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ignore_attachments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-initial_sequence>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keep_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keep_revision>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sequence_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-always_reconnect"] -===== `always_reconnect` - - * Value type is <> - * Default value is `true` - -Reconnect flag. When true, always try to reconnect after a failure - -[id="{version}-plugins-{type}s-{plugin}-ca_file"] -===== `ca_file` - - * Value type is <> - * There is no default value for this setting. - -Path to a CA certificate file, used to validate certificates - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The CouchDB db to connect to. -Required parameter. - -[id="{version}-plugins-{type}s-{plugin}-heartbeat"] -===== `heartbeat` - - * Value type is <> - * Default value is `1000` - -Logstash connects to CouchDB's _changes with feed=continuous -The heartbeat is how often (in milliseconds) Logstash will ping -CouchDB to ensure the connection is maintained. Changing this -setting is not recommended unless you know what you are doing. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -IP or hostname of your CouchDB instance - -[id="{version}-plugins-{type}s-{plugin}-ignore_attachments"] -===== `ignore_attachments` - - * Value type is <> - * Default value is `true` - -Future feature! Until implemented, changing this from the default -will not do anything. - -Ignore attachments associated with CouchDB documents. - -[id="{version}-plugins-{type}s-{plugin}-initial_sequence"] -===== `initial_sequence` - - * Value type is <> - * There is no default value for this setting. - -If unspecified, Logstash will attempt to read the last sequence number -from the `sequence_path` file. If that is empty or non-existent, it will -begin with 0 (the beginning). - -If you specify this value, it is anticipated that you will -only be doing so for an initial read under special circumstances -and that you will unset this value afterwards. - -[id="{version}-plugins-{type}s-{plugin}-keep_id"] -===== `keep_id` - - * Value type is <> - * Default value is `false` - -Preserve the CouchDB document id "_id" value in the -output. - -[id="{version}-plugins-{type}s-{plugin}-keep_revision"] -===== `keep_revision` - - * Value type is <> - * Default value is `false` - -Preserve the CouchDB document revision "_rev" value in the -output. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `nil` - -Password, if authentication is needed to connect to -CouchDB - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5984` - -Port of your CouchDB instance. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_delay"] -===== `reconnect_delay` - - * Value type is <> - * Default value is `10` - -Reconnect delay: time between reconnect attempts, in seconds. - -[id="{version}-plugins-{type}s-{plugin}-secure"] -===== `secure` - - * Value type is <> - * Default value is `false` - -Connect to CouchDB's _changes feed securely (via https) -Default: false (via http) - -[id="{version}-plugins-{type}s-{plugin}-sequence_path"] -===== `sequence_path` - - * Value type is <> - * There is no default value for this setting. - -File path where the last sequence number in the _changes -stream is stored. If unset it will write to `$HOME/.couchdb_seq` - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * There is no default value for this setting. - -Timeout: Number of milliseconds to wait for new data before -terminating the connection. If a timeout is set it will disable -the heartbeat configuration option. - -[id="{version}-plugins-{type}s-{plugin}-username"] -===== `username` - - * Value type is <> - * Default value is `nil` - -Username, if authentication is needed to connect to -CouchDB - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-index.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-index.asciidoc deleted file mode 100644 index 5f2a99acf..000000000 --- a/docs/versioned-plugins/inputs/dead_letter_queue-index.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -:plugin: dead_letter_queue -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-09-20 -| <> | 2017-08-25 -| <> | 2017-08-15 -| <> | 2017-07-12 -| <> | 2017-06-23 -| <> | 2017-06-06 -|======================================================================= - -include::dead_letter_queue-v1.1.2.asciidoc[] -include::dead_letter_queue-v1.1.1.asciidoc[] -include::dead_letter_queue-v1.1.0.asciidoc[] -include::dead_letter_queue-v1.0.6.asciidoc[] -include::dead_letter_queue-v1.0.5.asciidoc[] -include::dead_letter_queue-v1.0.4.asciidoc[] -include::dead_letter_queue-v1.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.3.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.3.asciidoc deleted file mode 100644 index 24f84678f..000000000 --- a/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.3.asciidoc +++ /dev/null @@ -1,108 +0,0 @@ -:plugin: dead_letter_queue -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.3 -:release_date: 2017-06-06 -:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.0.3/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Dead_letter_queue - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Logstash input to read events from Logstash's dead letter queue. - -[source, sh] ------------------------------------------ -input { - dead_letter_queue { - path => "/var/logstash/data/dead_letter_queue" - start_timestamp => "2017-04-04T23:40:37" - } -} ------------------------------------------ - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Dead_letter_queue Input Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes -| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |<>|No -|======================================================================= - -Also see <> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] -===== `commit_offsets` - - * Value type is <> - * Default value is `true` - -Specifies whether this input should commit offsets as it processes the events. -Typically you specify `false` when you want to iterate multiple times over the -events in the dead letter queue, but don't want to save state. This is when you -are exploring the events in the dead letter queue. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Path to the dead letter queue directory that was created by a Logstash instance. -This is the path from which "dead" events are read and is typically configured -in the original Logstash instance with the setting `path.dead_letter_queue`. - -[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] -===== `pipeline_id` - - * Value type is <> - * Default value is `"main"` - -ID of the pipeline whose events you want to read from. - -[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] -===== `sincedb_path` - - * Value type is <> - * There is no default value for this setting. - -Path of the sincedb database file (keeps track of the current position of dead letter queue) that -will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. - -NOTE: This value must be a file path and not a directory path. - -[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] -===== `start_timestamp` - - * Value type is <> - * There is no default value for this setting. - -Timestamp in ISO8601 format from when you want to start processing the events from. -For example, `2017-04-04T23:40:37`. - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.4.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.4.asciidoc deleted file mode 100644 index 9f8dc1474..000000000 --- a/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.4.asciidoc +++ /dev/null @@ -1,109 +0,0 @@ -:plugin: dead_letter_queue -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Dead_letter_queue input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Logstash input to read events from Logstash's dead letter queue. - -[source, sh] ------------------------------------------ -input { - dead_letter_queue { - path => "/var/logstash/data/dead_letter_queue" - start_timestamp => "2017-04-04T23:40:37" - } -} ------------------------------------------ - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Dead_letter_queue Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes -| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] -===== `commit_offsets` - - * Value type is <> - * Default value is `true` - -Specifies whether this input should commit offsets as it processes the events. -Typically you specify `false` when you want to iterate multiple times over the -events in the dead letter queue, but don't want to save state. This is when you -are exploring the events in the dead letter queue. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Path to the dead letter queue directory that was created by a Logstash instance. -This is the path from which "dead" events are read and is typically configured -in the original Logstash instance with the setting `path.dead_letter_queue`. - -[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] -===== `pipeline_id` - - * Value type is <> - * Default value is `"main"` - -ID of the pipeline whose events you want to read from. - -[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] -===== `sincedb_path` - - * Value type is <> - * There is no default value for this setting. - -Path of the sincedb database file (keeps track of the current position of dead letter queue) that -will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. - -NOTE: This value must be a file path and not a directory path. - -[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] -===== `start_timestamp` - - * Value type is <> - * There is no default value for this setting. - -Timestamp in ISO8601 format from when you want to start processing the events from. -For example, `2017-04-04T23:40:37`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.5.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.5.asciidoc deleted file mode 100644 index b2078c3a1..000000000 --- a/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.5.asciidoc +++ /dev/null @@ -1,109 +0,0 @@ -:plugin: dead_letter_queue -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.5 -:release_date: 2017-07-12 -:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Dead_letter_queue input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Logstash input to read events from Logstash's dead letter queue. - -[source, sh] ------------------------------------------ -input { - dead_letter_queue { - path => "/var/logstash/data/dead_letter_queue" - start_timestamp => "2017-04-04T23:40:37" - } -} ------------------------------------------ - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Dead_letter_queue Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes -| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] -===== `commit_offsets` - - * Value type is <> - * Default value is `true` - -Specifies whether this input should commit offsets as it processes the events. -Typically you specify `false` when you want to iterate multiple times over the -events in the dead letter queue, but don't want to save state. This is when you -are exploring the events in the dead letter queue. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Path to the dead letter queue directory that was created by a Logstash instance. -This is the path from which "dead" events are read and is typically configured -in the original Logstash instance with the setting `path.dead_letter_queue`. - -[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] -===== `pipeline_id` - - * Value type is <> - * Default value is `"main"` - -ID of the pipeline whose events you want to read from. - -[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] -===== `sincedb_path` - - * Value type is <> - * There is no default value for this setting. - -Path of the sincedb database file (keeps track of the current position of dead letter queue) that -will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. - -NOTE: This value must be a file path and not a directory path. - -[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] -===== `start_timestamp` - - * Value type is <> - * There is no default value for this setting. - -Timestamp in ISO8601 format from when you want to start processing the events from. -For example, `2017-04-04T23:40:37`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.6.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.6.asciidoc deleted file mode 100644 index 1cb2f1507..000000000 --- a/docs/versioned-plugins/inputs/dead_letter_queue-v1.0.6.asciidoc +++ /dev/null @@ -1,109 +0,0 @@ -:plugin: dead_letter_queue -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.6 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Dead_letter_queue input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Logstash input to read events from Logstash's dead letter queue. - -[source, sh] ------------------------------------------ -input { - dead_letter_queue { - path => "/var/logstash/data/dead_letter_queue" - start_timestamp => "2017-04-04T23:40:37" - } -} ------------------------------------------ - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Dead_letter_queue Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes -| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] -===== `commit_offsets` - - * Value type is <> - * Default value is `true` - -Specifies whether this input should commit offsets as it processes the events. -Typically you specify `false` when you want to iterate multiple times over the -events in the dead letter queue, but don't want to save state. This is when you -are exploring the events in the dead letter queue. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Path to the dead letter queue directory that was created by a Logstash instance. -This is the path from which "dead" events are read and is typically configured -in the original Logstash instance with the setting `path.dead_letter_queue`. - -[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] -===== `pipeline_id` - - * Value type is <> - * Default value is `"main"` - -ID of the pipeline whose events you want to read from. - -[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] -===== `sincedb_path` - - * Value type is <> - * There is no default value for this setting. - -Path of the sincedb database file (keeps track of the current position of dead letter queue) that -will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. - -NOTE: This value must be a file path and not a directory path. - -[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] -===== `start_timestamp` - - * Value type is <> - * There is no default value for this setting. - -Timestamp in ISO8601 format from when you want to start processing the events from. -For example, `2017-04-04T23:40:37`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.0.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.0.asciidoc deleted file mode 100644 index 5b75eb130..000000000 --- a/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.0.asciidoc +++ /dev/null @@ -1,109 +0,0 @@ -:plugin: dead_letter_queue -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.1.0 -:release_date: 2017-08-25 -:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.1.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Dead_letter_queue input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Logstash input to read events from Logstash's dead letter queue. - -[source, sh] ------------------------------------------ -input { - dead_letter_queue { - path => "/var/logstash/data/dead_letter_queue" - start_timestamp => "2017-04-04T23:40:37" - } -} ------------------------------------------ - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Dead_letter_queue Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes -| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] -===== `commit_offsets` - - * Value type is <> - * Default value is `true` - -Specifies whether this input should commit offsets as it processes the events. -Typically you specify `false` when you want to iterate multiple times over the -events in the dead letter queue, but don't want to save state. This is when you -are exploring the events in the dead letter queue. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Path to the dead letter queue directory that was created by a Logstash instance. -This is the path from which "dead" events are read and is typically configured -in the original Logstash instance with the setting `path.dead_letter_queue`. - -[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] -===== `pipeline_id` - - * Value type is <> - * Default value is `"main"` - -ID of the pipeline whose events you want to read from. - -[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] -===== `sincedb_path` - - * Value type is <> - * There is no default value for this setting. - -Path of the sincedb database file (keeps track of the current position of dead letter queue) that -will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. - -NOTE: This value must be a file path and not a directory path. - -[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] -===== `start_timestamp` - - * Value type is <> - * There is no default value for this setting. - -Timestamp in ISO8601 format from when you want to start processing the events from. -For example, `2017-04-04T23:40:37`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.1.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.1.asciidoc deleted file mode 100644 index 041bfd037..000000000 --- a/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.1.asciidoc +++ /dev/null @@ -1,112 +0,0 @@ -:plugin: dead_letter_queue -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.1.1 -:release_date: 2017-09-20 -:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.1.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Dead_letter_queue input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Logstash input to read events from Logstash's dead letter queue. - -[source, sh] ------------------------------------------ -input { - dead_letter_queue { - path => "/var/logstash/data/dead_letter_queue" - start_timestamp => "2017-04-04T23:40:37" - } -} ------------------------------------------ - - -+For more information about processing events in the dead letter queue, see -+{logstash-ref}/dead-letter-queues.html[Dead Letter Queues]. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Dead_letter_queue Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes -| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] -===== `commit_offsets` - - * Value type is <> - * Default value is `true` - -Specifies whether this input should commit offsets as it processes the events. -Typically you specify `false` when you want to iterate multiple times over the -events in the dead letter queue, but don't want to save state. This is when you -are exploring the events in the dead letter queue. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Path to the dead letter queue directory that was created by a Logstash instance. -This is the path from which "dead" events are read and is typically configured -in the original Logstash instance with the setting `path.dead_letter_queue`. - -[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] -===== `pipeline_id` - - * Value type is <> - * Default value is `"main"` - -ID of the pipeline whose events you want to read from. - -[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] -===== `sincedb_path` - - * Value type is <> - * There is no default value for this setting. - -Path of the sincedb database file (keeps track of the current position of dead letter queue) that -will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. - -NOTE: This value must be a file path and not a directory path. - -[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] -===== `start_timestamp` - - * Value type is <> - * There is no default value for this setting. - -Timestamp in ISO8601 format from when you want to start processing the events from. -For example, `2017-04-04T23:40:37`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.2.asciidoc b/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.2.asciidoc deleted file mode 100644 index 27d165723..000000000 --- a/docs/versioned-plugins/inputs/dead_letter_queue-v1.1.2.asciidoc +++ /dev/null @@ -1,112 +0,0 @@ -:plugin: dead_letter_queue -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.1.2 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-dead_letter_queue/blob/v1.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Dead_letter_queue input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Logstash input to read events from Logstash's dead letter queue. - -[source, sh] ------------------------------------------ -input { - dead_letter_queue { - path => "/var/logstash/data/dead_letter_queue" - start_timestamp => "2017-04-04T23:40:37" - } -} ------------------------------------------ - - -+For more information about processing events in the dead letter queue, see -+{logstash-ref}/dead-letter-queues.html[Dead Letter Queues]. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Dead_letter_queue Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-commit_offsets>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |a valid filesystem path|Yes -| <<{version}-plugins-{type}s-{plugin}-pipeline_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-start_timestamp>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-commit_offsets"] -===== `commit_offsets` - - * Value type is <> - * Default value is `true` - -Specifies whether this input should commit offsets as it processes the events. -Typically you specify `false` when you want to iterate multiple times over the -events in the dead letter queue, but don't want to save state. This is when you -are exploring the events in the dead letter queue. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Path to the dead letter queue directory that was created by a Logstash instance. -This is the path from which "dead" events are read and is typically configured -in the original Logstash instance with the setting `path.dead_letter_queue`. - -[id="{version}-plugins-{type}s-{plugin}-pipeline_id"] -===== `pipeline_id` - - * Value type is <> - * Default value is `"main"` - -ID of the pipeline whose events you want to read from. - -[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] -===== `sincedb_path` - - * Value type is <> - * There is no default value for this setting. - -Path of the sincedb database file (keeps track of the current position of dead letter queue) that -will be written to disk. The default will write sincedb files to `/plugins/inputs/dead_letter_queue`. - -NOTE: This value must be a file path and not a directory path. - -[id="{version}-plugins-{type}s-{plugin}-start_timestamp"] -===== `start_timestamp` - - * Value type is <> - * There is no default value for this setting. - -Timestamp in ISO8601 format from when you want to start processing the events from. -For example, `2017-04-04T23:40:37`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/drupal_dblog-index.asciidoc b/docs/versioned-plugins/inputs/drupal_dblog-index.asciidoc deleted file mode 100644 index 7c7d4b835..000000000 --- a/docs/versioned-plugins/inputs/drupal_dblog-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: drupal_dblog -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::drupal_dblog-v2.0.6.asciidoc[] -include::drupal_dblog-v2.0.5.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/drupal_dblog-v2.0.5.asciidoc b/docs/versioned-plugins/inputs/drupal_dblog-v2.0.5.asciidoc deleted file mode 100644 index ada5190bf..000000000 --- a/docs/versioned-plugins/inputs/drupal_dblog-v2.0.5.asciidoc +++ /dev/null @@ -1,102 +0,0 @@ -:plugin: drupal_dblog -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-drupal_dblog/blob/v2.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Drupal_dblog input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Retrieve watchdog log events from a Drupal installation with DBLog enabled. -The events are pulled out directly from the database. -The original events are not deleted, and on every consecutive run only new -events are pulled. - -The last watchdog event id that was processed is stored in the Drupal -variable table with the name "logstash_last_wid". Delete this variable or -set it to 0 if you want to re-import all events. - -More info on DBLog: http://drupal.org/documentation/modules/dblog - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Drupal_dblog Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-add_usernames>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulksize>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-databases>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-add_usernames"] -===== `add_usernames` - - * Value type is <> - * Default value is `false` - -By default, the event only contains the current user id as a field. -If you whish to add the username as an additional field, set this to true. - -[id="{version}-plugins-{type}s-{plugin}-bulksize"] -===== `bulksize` - - * Value type is <> - * Default value is `5000` - -The amount of log messages that should be fetched with each query. -Bulk fetching is done to prevent querying huge data sets when lots of -messages are in the database. - -[id="{version}-plugins-{type}s-{plugin}-databases"] -===== `databases` - - * Value type is <> - * There is no default value for this setting. - -Specify all drupal databases that you whish to import from. -This can be as many as you whish. -The format is a hash, with a unique site name as the key, and a databse -url as the value. - -Example: -[ - "site1", "mysql://user1:password@host1.com/databasename", - "other_site", "mysql://user2:password@otherhost.com/databasename", - ... -] - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `10` - -Time between checks in minutes. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/drupal_dblog-v2.0.6.asciidoc b/docs/versioned-plugins/inputs/drupal_dblog-v2.0.6.asciidoc deleted file mode 100644 index df31047c0..000000000 --- a/docs/versioned-plugins/inputs/drupal_dblog-v2.0.6.asciidoc +++ /dev/null @@ -1,102 +0,0 @@ -:plugin: drupal_dblog -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.6 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-drupal_dblog/blob/v2.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Drupal_dblog input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Retrieve watchdog log events from a Drupal installation with DBLog enabled. -The events are pulled out directly from the database. -The original events are not deleted, and on every consecutive run only new -events are pulled. - -The last watchdog event id that was processed is stored in the Drupal -variable table with the name "logstash_last_wid". Delete this variable or -set it to 0 if you want to re-import all events. - -More info on DBLog: http://drupal.org/documentation/modules/dblog - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Drupal_dblog Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-add_usernames>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulksize>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-databases>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-add_usernames"] -===== `add_usernames` - - * Value type is <> - * Default value is `false` - -By default, the event only contains the current user id as a field. -If you whish to add the username as an additional field, set this to true. - -[id="{version}-plugins-{type}s-{plugin}-bulksize"] -===== `bulksize` - - * Value type is <> - * Default value is `5000` - -The amount of log messages that should be fetched with each query. -Bulk fetching is done to prevent querying huge data sets when lots of -messages are in the database. - -[id="{version}-plugins-{type}s-{plugin}-databases"] -===== `databases` - - * Value type is <> - * There is no default value for this setting. - -Specify all drupal databases that you whish to import from. -This can be as many as you whish. -The format is a hash, with a unique site name as the key, and a databse -url as the value. - -Example: -[ - "site1", "mysql://user1:password@host1.com/databasename", - "other_site", "mysql://user2:password@otherhost.com/databasename", - ... -] - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `10` - -Time between checks in minutes. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/dynamodb-index.asciidoc b/docs/versioned-plugins/inputs/dynamodb-index.asciidoc deleted file mode 100644 index e0cbc16b7..000000000 --- a/docs/versioned-plugins/inputs/dynamodb-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: dynamodb -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/inputs/elasticsearch-index.asciidoc b/docs/versioned-plugins/inputs/elasticsearch-index.asciidoc deleted file mode 100644 index 8984db997..000000000 --- a/docs/versioned-plugins/inputs/elasticsearch-index.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -:plugin: elasticsearch -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-10-27 -| <> | 2017-09-20 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::elasticsearch-v4.1.1.asciidoc[] -include::elasticsearch-v4.1.0.asciidoc[] -include::elasticsearch-v4.0.6.asciidoc[] -include::elasticsearch-v4.0.5.asciidoc[] -include::elasticsearch-v4.0.4.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/elasticsearch-v4.0.4.asciidoc b/docs/versioned-plugins/inputs/elasticsearch-v4.0.4.asciidoc deleted file mode 100644 index 8c79ab613..000000000 --- a/docs/versioned-plugins/inputs/elasticsearch-v4.0.4.asciidoc +++ /dev/null @@ -1,220 +0,0 @@ -:plugin: elasticsearch -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-elasticsearch/blob/v4.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Elasticsearch input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -.Compatibility Note -[NOTE] -================================================================================ -Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] -called `http.content_type.required`. If this option is set to `true`, and you -are using Logstash 2.4 through 5.2, you need to update the Elasticsearch input -plugin to version 4.0.2 or higher. - -================================================================================ - -Read from an Elasticsearch cluster, based on search query results. -This is useful for replaying test logs, reindexing, etc. - -Example: -[source,ruby] - input { - # Read all documents from Elasticsearch matching the given query - elasticsearch { - hosts => "localhost" - query => '{ "query": { "match": { "statuscode": 200 } }, "sort": [ "_doc" ] }' - } - } - -This would create an Elasticsearch query with the following format: -[source,json] - curl 'http://localhost:9200/logstash-*/_search?&scroll=1m&size=1000' -d '{ - "query": { - "match": { - "statuscode": 200 - } - }, - "sort": [ "_doc" ] - }' - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Elasticsearch Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-docinfo>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-docinfo_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-docinfo_target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scroll>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ca_file"] -===== `ca_file` - - * Value type is <> - * There is no default value for this setting. - -SSL Certificate Authority file in PEM encoded format, must also include any chain certificates as necessary - -[id="{version}-plugins-{type}s-{plugin}-docinfo"] -===== `docinfo` - - * Value type is <> - * Default value is `false` - -If set, include Elasticsearch document information such as index, type, and -the id in the event. - -It might be important to note, with regards to metadata, that if you're -ingesting documents with the intent to re-index them (or just update them) -that the `action` option in the elasticsearch output wants to know how to -handle those things. It can be dynamically assigned with a field -added to the metadata. - -Example -[source, ruby] - input { - elasticsearch { - hosts => "es.production.mysite.org" - index => "mydata-2018.09.*" - query => "*" - size => 500 - scroll => "5m" - docinfo => true - } - } - output { - elasticsearch { - index => "copy-of-production.%{[@metadata][_index]}" - document_type => "%{[@metadata][_type]}" - document_id => "%{[@metadata][_id]}" - } - } - - -[id="{version}-plugins-{type}s-{plugin}-docinfo_fields"] -===== `docinfo_fields` - - * Value type is <> - * Default value is `["_index", "_type", "_id"]` - -List of document metadata to move to the `docinfo_target` field -To learn more about Elasticsearch metadata fields read -http://www.elasticsearch.org/guide/en/elasticsearch/guide/current/_document_metadata.html - -[id="{version}-plugins-{type}s-{plugin}-docinfo_target"] -===== `docinfo_target` - - * Value type is <> - * Default value is `"@metadata"` - -Where to move the Elasticsearch document information by default we use the @metadata field. - -[id="{version}-plugins-{type}s-{plugin}-hosts"] -===== `hosts` - - * Value type is <> - * There is no default value for this setting. - -List of elasticsearch hosts to use for querying. -each host can be either IP, HOST, IP:port or HOST:port -port defaults to 9200 - -[id="{version}-plugins-{type}s-{plugin}-index"] -===== `index` - - * Value type is <> - * Default value is `"logstash-*"` - -The index or alias to search. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Basic Auth - password - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * Value type is <> - * Default value is `"{ \"sort\": [ \"_doc\" ] }"` - -The query to be executed. Read the Elasticsearch query DSL documentation -for more info -https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html - -[id="{version}-plugins-{type}s-{plugin}-scroll"] -===== `scroll` - - * Value type is <> - * Default value is `"1m"` - -This parameter controls the keepalive time in seconds of the scrolling -request and initiates the scrolling process. The timeout applies per -round trip (i.e. between the previous scroll request, to the next). - -[id="{version}-plugins-{type}s-{plugin}-size"] -===== `size` - - * Value type is <> - * Default value is `1000` - -This allows you to set the maximum number of hits returned per scroll. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `false` - -SSL - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Basic Auth - username - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/elasticsearch-v4.0.5.asciidoc b/docs/versioned-plugins/inputs/elasticsearch-v4.0.5.asciidoc deleted file mode 100644 index a45fde460..000000000 --- a/docs/versioned-plugins/inputs/elasticsearch-v4.0.5.asciidoc +++ /dev/null @@ -1,230 +0,0 @@ -:plugin: elasticsearch -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.5 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-elasticsearch/blob/v4.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Elasticsearch input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -.Compatibility Note -[NOTE] -================================================================================ -Starting with Elasticsearch 5.3, there's an {ref}modules-http.html[HTTP setting] -called `http.content_type.required`. If this option is set to `true`, and you -are using Logstash 2.4 through 5.2, you need to update the Elasticsearch input -plugin to version 4.0.2 or higher. - -================================================================================ - -Read from an Elasticsearch cluster, based on search query results. -This is useful for replaying test logs, reindexing, etc. - -Example: -[source,ruby] - input { - # Read all documents from Elasticsearch matching the given query - elasticsearch { - hosts => "localhost" - query => '{ "query": { "match": { "statuscode": 200 } }, "sort": [ "_doc" ] }' - } - } - -This would create an Elasticsearch query with the following format: -[source,json] - curl 'http://localhost:9200/logstash-*/_search?&scroll=1m&size=1000' -d '{ - "query": { - "match": { - "statuscode": 200 - } - }, - "sort": [ "_doc" ] - }' - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Elasticsearch Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-docinfo>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-docinfo_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-docinfo_target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scroll>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ca_file"] -===== `ca_file` - - * Value type is <> - * There is no default value for this setting. - -SSL Certificate Authority file in PEM encoded format, must also -include any chain certificates as necessary. - -[id="{version}-plugins-{type}s-{plugin}-docinfo"] -===== `docinfo` - - * Value type is <> - * Default value is `false` - -If set, include Elasticsearch document information such as index, type, and -the id in the event. - -It might be important to note, with regards to metadata, that if you're -ingesting documents with the intent to re-index them (or just update them) -that the `action` option in the elasticsearch output wants to know how to -handle those things. It can be dynamically assigned with a field -added to the metadata. - -Example -[source, ruby] - input { - elasticsearch { - hosts => "es.production.mysite.org" - index => "mydata-2018.09.*" - query => '{ "query": { "query_string": { "query": "*" } } }' - size => 500 - scroll => "5m" - docinfo => true - } - } - output { - elasticsearch { - index => "copy-of-production.%{[@metadata][_index]}" - document_type => "%{[@metadata][_type]}" - document_id => "%{[@metadata][_id]}" - } - } - - -[id="{version}-plugins-{type}s-{plugin}-docinfo_fields"] -===== `docinfo_fields` - - * Value type is <> - * Default value is `["_index", "_type", "_id"]` - -If document metadata storage is requested by enabling the `docinfo` -option, this option lists the metadata fields to save in the current -event. See -[Document Metadata](http://www.elasticsearch.org/guide/en/elasticsearch/guide/current/_document_metadata.html) -in the Elasticsearch documentation for more information. - -[id="{version}-plugins-{type}s-{plugin}-docinfo_target"] -===== `docinfo_target` - - * Value type is <> - * Default value is `"@metadata"` - -If document metadata storage is requested by enabling the `docinfo` -option, this option names the field under which to store the metadata -fields as subfields. - -[id="{version}-plugins-{type}s-{plugin}-hosts"] -===== `hosts` - - * Value type is <> - * There is no default value for this setting. - -List of one or more Elasticsearch hosts to use for querying. Each host -can be either IP, HOST, IP:port, or HOST:port. The port defaults to -9200. - -[id="{version}-plugins-{type}s-{plugin}-index"] -===== `index` - - * Value type is <> - * Default value is `"logstash-*"` - -The index or alias to search. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -The password to use together with the username in the `user` option -when authenticating to the Elasticsearch server. If set to an empty -string authentication will be disabled. - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * Value type is <> - * Default value is `'{ "sort": [ "_doc" ] }'` - -The query to be executed. Read the -[Elasticsearch query DSL documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html) -for more information. - -[id="{version}-plugins-{type}s-{plugin}-scroll"] -===== `scroll` - - * Value type is <> - * Default value is `"1m"` - -This parameter controls the keepalive time in seconds of the scrolling -request and initiates the scrolling process. The timeout applies per -round trip (i.e. between the previous scroll request, to the next). - -[id="{version}-plugins-{type}s-{plugin}-size"] -===== `size` - - * Value type is <> - * Default value is `1000` - -This allows you to set the maximum number of hits returned per scroll. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `false` - -If enabled, SSL will be used when communicating with the Elasticsearch -server (i.e. HTTPS will be used instead of plain HTTP). - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -The username to use together with the password in the `password` -option when authenticating to the Elasticsearch server. If set to an -empty string authentication will be disabled. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/elasticsearch-v4.0.6.asciidoc b/docs/versioned-plugins/inputs/elasticsearch-v4.0.6.asciidoc deleted file mode 100644 index 5d2391848..000000000 --- a/docs/versioned-plugins/inputs/elasticsearch-v4.0.6.asciidoc +++ /dev/null @@ -1,230 +0,0 @@ -:plugin: elasticsearch -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.6 -:release_date: 2017-09-20 -:changelog_url: https://github.com/logstash-plugins/logstash-input-elasticsearch/blob/v4.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Elasticsearch input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -.Compatibility Note -[NOTE] -================================================================================ -Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] -called `http.content_type.required`. If this option is set to `true`, and you -are using Logstash 2.4 through 5.2, you need to update the Elasticsearch input -plugin to version 4.0.2 or higher. - -================================================================================ - -Read from an Elasticsearch cluster, based on search query results. -This is useful for replaying test logs, reindexing, etc. - -Example: -[source,ruby] - input { - # Read all documents from Elasticsearch matching the given query - elasticsearch { - hosts => "localhost" - query => '{ "query": { "match": { "statuscode": 200 } }, "sort": [ "_doc" ] }' - } - } - -This would create an Elasticsearch query with the following format: -[source,json] - curl 'http://localhost:9200/logstash-*/_search?&scroll=1m&size=1000' -d '{ - "query": { - "match": { - "statuscode": 200 - } - }, - "sort": [ "_doc" ] - }' - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Elasticsearch Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-docinfo>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-docinfo_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-docinfo_target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scroll>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ca_file"] -===== `ca_file` - - * Value type is <> - * There is no default value for this setting. - -SSL Certificate Authority file in PEM encoded format, must also -include any chain certificates as necessary. - -[id="{version}-plugins-{type}s-{plugin}-docinfo"] -===== `docinfo` - - * Value type is <> - * Default value is `false` - -If set, include Elasticsearch document information such as index, type, and -the id in the event. - -It might be important to note, with regards to metadata, that if you're -ingesting documents with the intent to re-index them (or just update them) -that the `action` option in the elasticsearch output wants to know how to -handle those things. It can be dynamically assigned with a field -added to the metadata. - -Example -[source, ruby] - input { - elasticsearch { - hosts => "es.production.mysite.org" - index => "mydata-2018.09.*" - query => '{ "query": { "query_string": { "query": "*" } } }' - size => 500 - scroll => "5m" - docinfo => true - } - } - output { - elasticsearch { - index => "copy-of-production.%{[@metadata][_index]}" - document_type => "%{[@metadata][_type]}" - document_id => "%{[@metadata][_id]}" - } - } - - -[id="{version}-plugins-{type}s-{plugin}-docinfo_fields"] -===== `docinfo_fields` - - * Value type is <> - * Default value is `["_index", "_type", "_id"]` - -If document metadata storage is requested by enabling the `docinfo` -option, this option lists the metadata fields to save in the current -event. See -[Document Metadata](http://www.elasticsearch.org/guide/en/elasticsearch/guide/current/_document_metadata.html) -in the Elasticsearch documentation for more information. - -[id="{version}-plugins-{type}s-{plugin}-docinfo_target"] -===== `docinfo_target` - - * Value type is <> - * Default value is `"@metadata"` - -If document metadata storage is requested by enabling the `docinfo` -option, this option names the field under which to store the metadata -fields as subfields. - -[id="{version}-plugins-{type}s-{plugin}-hosts"] -===== `hosts` - - * Value type is <> - * There is no default value for this setting. - -List of one or more Elasticsearch hosts to use for querying. Each host -can be either IP, HOST, IP:port, or HOST:port. The port defaults to -9200. - -[id="{version}-plugins-{type}s-{plugin}-index"] -===== `index` - - * Value type is <> - * Default value is `"logstash-*"` - -The index or alias to search. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -The password to use together with the username in the `user` option -when authenticating to the Elasticsearch server. If set to an empty -string authentication will be disabled. - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * Value type is <> - * Default value is `'{ "sort": [ "_doc" ] }'` - -The query to be executed. Read the -[Elasticsearch query DSL documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html) -for more information. - -[id="{version}-plugins-{type}s-{plugin}-scroll"] -===== `scroll` - - * Value type is <> - * Default value is `"1m"` - -This parameter controls the keepalive time in seconds of the scrolling -request and initiates the scrolling process. The timeout applies per -round trip (i.e. between the previous scroll request, to the next). - -[id="{version}-plugins-{type}s-{plugin}-size"] -===== `size` - - * Value type is <> - * Default value is `1000` - -This allows you to set the maximum number of hits returned per scroll. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `false` - -If enabled, SSL will be used when communicating with the Elasticsearch -server (i.e. HTTPS will be used instead of plain HTTP). - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -The username to use together with the password in the `password` -option when authenticating to the Elasticsearch server. If set to an -empty string authentication will be disabled. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/elasticsearch-v4.1.0.asciidoc b/docs/versioned-plugins/inputs/elasticsearch-v4.1.0.asciidoc deleted file mode 100644 index d583e4fbd..000000000 --- a/docs/versioned-plugins/inputs/elasticsearch-v4.1.0.asciidoc +++ /dev/null @@ -1,230 +0,0 @@ -:plugin: elasticsearch -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.1.0 -:release_date: 2017-10-27 -:changelog_url: https://github.com/logstash-plugins/logstash-input-elasticsearch/blob/v4.1.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Elasticsearch input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -.Compatibility Note -[NOTE] -================================================================================ -Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] -called `http.content_type.required`. If this option is set to `true`, and you -are using Logstash 2.4 through 5.2, you need to update the Elasticsearch input -plugin to version 4.0.2 or higher. - -================================================================================ - -Read from an Elasticsearch cluster, based on search query results. -This is useful for replaying test logs, reindexing, etc. - -Example: -[source,ruby] - input { - # Read all documents from Elasticsearch matching the given query - elasticsearch { - hosts => "localhost" - query => '{ "query": { "match": { "statuscode": 200 } }, "sort": [ "_doc" ] }' - } - } - -This would create an Elasticsearch query with the following format: -[source,json] - curl 'http://localhost:9200/logstash-*/_search?&scroll=1m&size=1000' -d '{ - "query": { - "match": { - "statuscode": 200 - } - }, - "sort": [ "_doc" ] - }' - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Elasticsearch Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-docinfo>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-docinfo_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-docinfo_target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scroll>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ca_file"] -===== `ca_file` - - * Value type is <> - * There is no default value for this setting. - -SSL Certificate Authority file in PEM encoded format, must also -include any chain certificates as necessary. - -[id="{version}-plugins-{type}s-{plugin}-docinfo"] -===== `docinfo` - - * Value type is <> - * Default value is `false` - -If set, include Elasticsearch document information such as index, type, and -the id in the event. - -It might be important to note, with regards to metadata, that if you're -ingesting documents with the intent to re-index them (or just update them) -that the `action` option in the elasticsearch output wants to know how to -handle those things. It can be dynamically assigned with a field -added to the metadata. - -Example -[source, ruby] - input { - elasticsearch { - hosts => "es.production.mysite.org" - index => "mydata-2018.09.*" - query => '{ "query": { "query_string": { "query": "*" } } }' - size => 500 - scroll => "5m" - docinfo => true - } - } - output { - elasticsearch { - index => "copy-of-production.%{[@metadata][_index]}" - document_type => "%{[@metadata][_type]}" - document_id => "%{[@metadata][_id]}" - } - } - - -[id="{version}-plugins-{type}s-{plugin}-docinfo_fields"] -===== `docinfo_fields` - - * Value type is <> - * Default value is `["_index", "_type", "_id"]` - -If document metadata storage is requested by enabling the `docinfo` -option, this option lists the metadata fields to save in the current -event. See -[Document Metadata](http://www.elasticsearch.org/guide/en/elasticsearch/guide/current/_document_metadata.html) -in the Elasticsearch documentation for more information. - -[id="{version}-plugins-{type}s-{plugin}-docinfo_target"] -===== `docinfo_target` - - * Value type is <> - * Default value is `"@metadata"` - -If document metadata storage is requested by enabling the `docinfo` -option, this option names the field under which to store the metadata -fields as subfields. - -[id="{version}-plugins-{type}s-{plugin}-hosts"] -===== `hosts` - - * Value type is <> - * There is no default value for this setting. - -List of one or more Elasticsearch hosts to use for querying. Each host -can be either IP, HOST, IP:port, or HOST:port. The port defaults to -9200. - -[id="{version}-plugins-{type}s-{plugin}-index"] -===== `index` - - * Value type is <> - * Default value is `"logstash-*"` - -The index or alias to search. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -The password to use together with the username in the `user` option -when authenticating to the Elasticsearch server. If set to an empty -string authentication will be disabled. - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * Value type is <> - * Default value is `'{ "sort": [ "_doc" ] }'` - -The query to be executed. Read the -[Elasticsearch query DSL documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html) -for more information. - -[id="{version}-plugins-{type}s-{plugin}-scroll"] -===== `scroll` - - * Value type is <> - * Default value is `"1m"` - -This parameter controls the keepalive time in seconds of the scrolling -request and initiates the scrolling process. The timeout applies per -round trip (i.e. between the previous scroll request, to the next). - -[id="{version}-plugins-{type}s-{plugin}-size"] -===== `size` - - * Value type is <> - * Default value is `1000` - -This allows you to set the maximum number of hits returned per scroll. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `false` - -If enabled, SSL will be used when communicating with the Elasticsearch -server (i.e. HTTPS will be used instead of plain HTTP). - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -The username to use together with the password in the `password` -option when authenticating to the Elasticsearch server. If set to an -empty string authentication will be disabled. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/elasticsearch-v4.1.1.asciidoc b/docs/versioned-plugins/inputs/elasticsearch-v4.1.1.asciidoc deleted file mode 100644 index 75c2c4c95..000000000 --- a/docs/versioned-plugins/inputs/elasticsearch-v4.1.1.asciidoc +++ /dev/null @@ -1,230 +0,0 @@ -:plugin: elasticsearch -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.1.1 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-elasticsearch/blob/v4.1.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Elasticsearch input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -.Compatibility Note -[NOTE] -================================================================================ -Starting with Elasticsearch 5.3, there's an {ref}/modules-http.html[HTTP setting] -called `http.content_type.required`. If this option is set to `true`, and you -are using Logstash 2.4 through 5.2, you need to update the Elasticsearch input -plugin to version 4.0.2 or higher. - -================================================================================ - -Read from an Elasticsearch cluster, based on search query results. -This is useful for replaying test logs, reindexing, etc. - -Example: -[source,ruby] - input { - # Read all documents from Elasticsearch matching the given query - elasticsearch { - hosts => "localhost" - query => '{ "query": { "match": { "statuscode": 200 } }, "sort": [ "_doc" ] }' - } - } - -This would create an Elasticsearch query with the following format: -[source,json] - curl 'http://localhost:9200/logstash-*/_search?&scroll=1m&size=1000' -d '{ - "query": { - "match": { - "statuscode": 200 - } - }, - "sort": [ "_doc" ] - }' - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Elasticsearch Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ca_file>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-docinfo>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-docinfo_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-docinfo_target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scroll>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ca_file"] -===== `ca_file` - - * Value type is <> - * There is no default value for this setting. - -SSL Certificate Authority file in PEM encoded format, must also -include any chain certificates as necessary. - -[id="{version}-plugins-{type}s-{plugin}-docinfo"] -===== `docinfo` - - * Value type is <> - * Default value is `false` - -If set, include Elasticsearch document information such as index, type, and -the id in the event. - -It might be important to note, with regards to metadata, that if you're -ingesting documents with the intent to re-index them (or just update them) -that the `action` option in the elasticsearch output wants to know how to -handle those things. It can be dynamically assigned with a field -added to the metadata. - -Example -[source, ruby] - input { - elasticsearch { - hosts => "es.production.mysite.org" - index => "mydata-2018.09.*" - query => '{ "query": { "query_string": { "query": "*" } } }' - size => 500 - scroll => "5m" - docinfo => true - } - } - output { - elasticsearch { - index => "copy-of-production.%{[@metadata][_index]}" - document_type => "%{[@metadata][_type]}" - document_id => "%{[@metadata][_id]}" - } - } - - -[id="{version}-plugins-{type}s-{plugin}-docinfo_fields"] -===== `docinfo_fields` - - * Value type is <> - * Default value is `["_index", "_type", "_id"]` - -If document metadata storage is requested by enabling the `docinfo` -option, this option lists the metadata fields to save in the current -event. See -[Document Metadata](http://www.elasticsearch.org/guide/en/elasticsearch/guide/current/_document_metadata.html) -in the Elasticsearch documentation for more information. - -[id="{version}-plugins-{type}s-{plugin}-docinfo_target"] -===== `docinfo_target` - - * Value type is <> - * Default value is `"@metadata"` - -If document metadata storage is requested by enabling the `docinfo` -option, this option names the field under which to store the metadata -fields as subfields. - -[id="{version}-plugins-{type}s-{plugin}-hosts"] -===== `hosts` - - * Value type is <> - * There is no default value for this setting. - -List of one or more Elasticsearch hosts to use for querying. Each host -can be either IP, HOST, IP:port, or HOST:port. The port defaults to -9200. - -[id="{version}-plugins-{type}s-{plugin}-index"] -===== `index` - - * Value type is <> - * Default value is `"logstash-*"` - -The index or alias to search. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -The password to use together with the username in the `user` option -when authenticating to the Elasticsearch server. If set to an empty -string authentication will be disabled. - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * Value type is <> - * Default value is `'{ "sort": [ "_doc" ] }'` - -The query to be executed. Read the -[Elasticsearch query DSL documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl.html) -for more information. - -[id="{version}-plugins-{type}s-{plugin}-scroll"] -===== `scroll` - - * Value type is <> - * Default value is `"1m"` - -This parameter controls the keepalive time in seconds of the scrolling -request and initiates the scrolling process. The timeout applies per -round trip (i.e. between the previous scroll request, to the next). - -[id="{version}-plugins-{type}s-{plugin}-size"] -===== `size` - - * Value type is <> - * Default value is `1000` - -This allows you to set the maximum number of hits returned per scroll. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `false` - -If enabled, SSL will be used when communicating with the Elasticsearch -server (i.e. HTTPS will be used instead of plain HTTP). - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -The username to use together with the password in the `password` -option when authenticating to the Elasticsearch server. If set to an -empty string authentication will be disabled. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/eventlog-index.asciidoc b/docs/versioned-plugins/inputs/eventlog-index.asciidoc deleted file mode 100644 index b0d16b02d..000000000 --- a/docs/versioned-plugins/inputs/eventlog-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: eventlog -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::eventlog-v4.1.2.asciidoc[] -include::eventlog-v4.1.1.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/eventlog-v4.1.1.asciidoc b/docs/versioned-plugins/inputs/eventlog-v4.1.1.asciidoc deleted file mode 100644 index ddd773f17..000000000 --- a/docs/versioned-plugins/inputs/eventlog-v4.1.1.asciidoc +++ /dev/null @@ -1,74 +0,0 @@ -:plugin: eventlog -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.1.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-eventlog/blob/v4.1.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Eventlog - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will pull events from a http://msdn.microsoft.com/en-us/library/windows/desktop/bb309026%28v=vs.85%29.aspx[Windows Event Log]. -Note that Windows Event Logs are stored on disk in a binary format and are only accessible from the Win32 API. -This means Losgtash needs to be running as an agent on Windows servers where you wish to collect logs -from, and will not be accesible across the network. - -To collect Events from the System Event Log, use a config like: -[source,ruby] - input { - eventlog { - type => 'Win32-EventLog' - logfile => 'System' - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Eventlog Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-logfile>> |<>, one of `["Application", "Security", "System"]`|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `1000` - -How frequently should tail check for new event logs in ms (default: 1 second) - -[id="{version}-plugins-{type}s-{plugin}-logfile"] -===== `logfile` - - * Value can be any of: `Application`, `Security`, `System` - * Default value is `"Application"` - -Event Log Name -System and Security may require that privileges are given to the user running logstash. -see more at: https://social.technet.microsoft.com/forums/windowsserver/en-US/d2f813db-6142-4b5b-8d86-253ebb740473/easy-way-to-read-security-log - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/eventlog-v4.1.2.asciidoc b/docs/versioned-plugins/inputs/eventlog-v4.1.2.asciidoc deleted file mode 100644 index 658c35633..000000000 --- a/docs/versioned-plugins/inputs/eventlog-v4.1.2.asciidoc +++ /dev/null @@ -1,74 +0,0 @@ -:plugin: eventlog -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.1.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-eventlog/blob/v4.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Eventlog - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will pull events from a http://msdn.microsoft.com/en-us/library/windows/desktop/bb309026%28v=vs.85%29.aspx[Windows Event Log]. -Note that Windows Event Logs are stored on disk in a binary format and are only accessible from the Win32 API. -This means Losgtash needs to be running as an agent on Windows servers where you wish to collect logs -from, and will not be accesible across the network. - -To collect Events from the System Event Log, use a config like: -[source,ruby] - input { - eventlog { - type => 'Win32-EventLog' - logfile => 'System' - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Eventlog Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-logfile>> |<>, one of `["Application", "Security", "System"]`|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `1000` - -How frequently should tail check for new event logs in ms (default: 1 second) - -[id="{version}-plugins-{type}s-{plugin}-logfile"] -===== `logfile` - - * Value can be any of: `Application`, `Security`, `System` - * Default value is `"Application"` - -Event Log Name -System and Security may require that privileges are given to the user running logstash. -see more at: https://social.technet.microsoft.com/forums/windowsserver/en-US/d2f813db-6142-4b5b-8d86-253ebb740473/easy-way-to-read-security-log - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/example-index.asciidoc b/docs/versioned-plugins/inputs/example-index.asciidoc deleted file mode 100644 index 56c312bd8..000000000 --- a/docs/versioned-plugins/inputs/example-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: example -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/inputs/exec-index.asciidoc b/docs/versioned-plugins/inputs/exec-index.asciidoc deleted file mode 100644 index c7c61d1c4..000000000 --- a/docs/versioned-plugins/inputs/exec-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: exec -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::exec-v3.1.5.asciidoc[] -include::exec-v3.1.4.asciidoc[] -include::exec-v3.1.3.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/exec-v3.1.3.asciidoc b/docs/versioned-plugins/inputs/exec-v3.1.3.asciidoc deleted file mode 100644 index d3295b1bd..000000000 --- a/docs/versioned-plugins/inputs/exec-v3.1.3.asciidoc +++ /dev/null @@ -1,69 +0,0 @@ -:plugin: exec -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-exec/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Exec input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Periodically run a shell command and capture the whole output as an event. - -Notes: - -* The `command` field of this event will be the command run. -* The `message` field of this event will be the entire stdout of the command. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Exec Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-command"] -===== `command` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Command to run. For example, `uptime` - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Interval to run the command. Value is in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/exec-v3.1.4.asciidoc b/docs/versioned-plugins/inputs/exec-v3.1.4.asciidoc deleted file mode 100644 index a30ca6826..000000000 --- a/docs/versioned-plugins/inputs/exec-v3.1.4.asciidoc +++ /dev/null @@ -1,69 +0,0 @@ -:plugin: exec -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-exec/blob/v3.1.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Exec input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Periodically run a shell command and capture the whole output as an event. - -Notes: - -* The `command` field of this event will be the command run. -* The `message` field of this event will be the entire stdout of the command. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Exec Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-command"] -===== `command` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Command to run. For example, `uptime` - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Interval to run the command. Value is in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/exec-v3.1.5.asciidoc b/docs/versioned-plugins/inputs/exec-v3.1.5.asciidoc deleted file mode 100644 index dcc9315c6..000000000 --- a/docs/versioned-plugins/inputs/exec-v3.1.5.asciidoc +++ /dev/null @@ -1,69 +0,0 @@ -:plugin: exec -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-exec/blob/v3.1.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Exec input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Periodically run a shell command and capture the whole output as an event. - -Notes: - -* The `command` field of this event will be the command run. -* The `message` field of this event will be the entire stdout of the command. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Exec Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-command"] -===== `command` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Command to run. For example, `uptime` - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Interval to run the command. Value is in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/file-index.asciidoc b/docs/versioned-plugins/inputs/file-index.asciidoc deleted file mode 100644 index a4e00321d..000000000 --- a/docs/versioned-plugins/inputs/file-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: file -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::file-v4.0.3.asciidoc[] -include::file-v4.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/file-v4.0.2.asciidoc b/docs/versioned-plugins/inputs/file-v4.0.2.asciidoc deleted file mode 100644 index 16472c859..000000000 --- a/docs/versioned-plugins/inputs/file-v4.0.2.asciidoc +++ /dev/null @@ -1,256 +0,0 @@ -:plugin: file -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-file/blob/v4.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== File input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Stream events from files, normally by tailing them in a manner -similar to `tail -0F` but optionally reading them from the -beginning. - -By default, each event is assumed to be one line and a line is -taken to be the text before a newline character. -Normally, logging will add a newline to the end of each line written. -If you would like to join multiple log lines into one event, -you'll want to use the multiline codec or filter. - -The plugin aims to track changing files and emit new content as it's -appended to each file. It's not well-suited for reading a file from -beginning to end and storing all of it in a single event (not even -with the multiline codec or filter). - -==== Reading from remote network volumes - -The file input is not tested on remote filesystems such as NFS, Samba, s3fs-fuse, etc. These -remote filesystems typically have behaviors that are very different from local filesystems and -are therefore unlikely to work correctly when used with the file input. - -==== Tracking of current position in watched files - -The plugin keeps track of the current position in each file by -recording it in a separate file named sincedb. This makes it -possible to stop and restart Logstash and have it pick up where it -left off without missing the lines that were added to the file while -Logstash was stopped. - -By default, the sincedb file is placed in the home directory of the -user running Logstash with a filename based on the filename patterns -being watched (i.e. the `path` option). Thus, changing the filename -patterns will result in a new sincedb file being used and any -existing current position state will be lost. If you change your -patterns with any frequency it might make sense to explicitly choose -a sincedb path with the `sincedb_path` option. - -A different `sincedb_path` must be used for each input. Using the same -path will cause issues. The read checkpoints for each input must be -stored in a different path so the information does not override. - -Sincedb files are text files with four columns: - -. The inode number (or equivalent). -. The major device number of the file system (or equivalent). -. The minor device number of the file system (or equivalent). -. The current byte offset within the file. - -On non-Windows systems you can obtain the inode number of a file -with e.g. `ls -li`. - -==== File rotation - -File rotation is detected and handled by this input, regardless of -whether the file is rotated via a rename or a copy operation. To -support programs that write to the rotated file for some time after -the rotation has taken place, include both the original filename and -the rotated filename (e.g. /var/log/syslog and /var/log/syslog.1) in -the filename patterns to watch (the `path` option). Note that the -rotated filename will be treated as a new file so if -`start_position` is set to 'beginning' the rotated file will be -reprocessed. - -With the default value of `start_position` ('end') any messages -written to the end of the file between the last read operation prior -to the rotation and its reopening under the new name (an interval -determined by the `stat_interval` and `discover_interval` options) -will not get picked up. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== File Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-close_older>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-discover_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ignore_older>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_open_files>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_write_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-start_position>> |<>, one of `["beginning", "end"]`|No -| <<{version}-plugins-{type}s-{plugin}-stat_interval>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-close_older"] -===== `close_older` - - * Value type is <> - * Default value is `3600` - -The file input closes any files that were last read the specified -timespan in seconds ago. -This has different implications depending on if a file is being tailed or -read. If tailing, and there is a large time gap in incoming data the file -can be closed (allowing other files to be opened) but will be queued for -reopening when new data is detected. If reading, the file will be closed -after closed_older seconds from when the last bytes were read. -The default is 1 hour - -[id="{version}-plugins-{type}s-{plugin}-delimiter"] -===== `delimiter` - - * Value type is <> - * Default value is `"\n"` - -set the new line delimiter, defaults to "\n" - -[id="{version}-plugins-{type}s-{plugin}-discover_interval"] -===== `discover_interval` - - * Value type is <> - * Default value is `15` - -How often (in seconds) we expand the filename patterns in the -`path` option to discover new files to watch. - -[id="{version}-plugins-{type}s-{plugin}-exclude"] -===== `exclude` - - * Value type is <> - * There is no default value for this setting. - -Exclusions (matched against the filename, not full path). Filename -patterns are valid here, too. For example, if you have -[source,ruby] - path => "/var/log/*" - -You might want to exclude gzipped files: -[source,ruby] - exclude => "*.gz" - -[id="{version}-plugins-{type}s-{plugin}-ignore_older"] -===== `ignore_older` - - * Value type is <> - * There is no default value for this setting. - -When the file input discovers a file that was last modified -before the specified timespan in seconds, the file is ignored. -After it's discovery, if an ignored file is modified it is no -longer ignored and any new data is read. By default, this option is -disabled. Note this unit is in seconds. - -[id="{version}-plugins-{type}s-{plugin}-max_open_files"] -===== `max_open_files` - - * Value type is <> - * There is no default value for this setting. - -What is the maximum number of file_handles that this input consumes -at any one time. Use close_older to close some files if you need to -process more files than this number. This should not be set to the -maximum the OS can do because file handles are needed for other -LS plugins and OS processes. -The default of 4095 is set in filewatch. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The path(s) to the file(s) to use as an input. -You can use filename patterns here, such as `/var/log/*.log`. -If you use a pattern like `/var/log/**/*.log`, a recursive search -of `/var/log` will be done for all `*.log` files. -Paths must be absolute and cannot be relative. - -You may also configure multiple paths. See an example -on the <>. - -[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] -===== `sincedb_path` - - * Value type is <> - * There is no default value for this setting. - -Path of the sincedb database file (keeps track of the current -position of monitored log files) that will be written to disk. -The default will write sincedb files to `/plugins/inputs/file` -NOTE: it must be a file path and not a directory path - -[id="{version}-plugins-{type}s-{plugin}-sincedb_write_interval"] -===== `sincedb_write_interval` - - * Value type is <> - * Default value is `15` - -How often (in seconds) to write a since database with the current position of -monitored log files. - -[id="{version}-plugins-{type}s-{plugin}-start_position"] -===== `start_position` - - * Value can be any of: `beginning`, `end` - * Default value is `"end"` - -Choose where Logstash starts initially reading files: at the beginning or -at the end. The default behavior treats files like live streams and thus -starts at the end. If you have old data you want to import, set this -to 'beginning'. - -This option only modifies "first contact" situations where a file -is new and not seen before, i.e. files that don't have a current -position recorded in a sincedb file read by Logstash. If a file -has already been seen before, this option has no effect and the -position recorded in the sincedb file will be used. - -[id="{version}-plugins-{type}s-{plugin}-stat_interval"] -===== `stat_interval` - - * Value type is <> - * Default value is `1` - -How often (in seconds) we stat files to see if they have been modified. -Increasing this interval will decrease the number of system calls we make, -but increase the time to detect new log lines. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/file-v4.0.3.asciidoc b/docs/versioned-plugins/inputs/file-v4.0.3.asciidoc deleted file mode 100644 index ccbe78e84..000000000 --- a/docs/versioned-plugins/inputs/file-v4.0.3.asciidoc +++ /dev/null @@ -1,256 +0,0 @@ -:plugin: file -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.3 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-file/blob/v4.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== File input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Stream events from files, normally by tailing them in a manner -similar to `tail -0F` but optionally reading them from the -beginning. - -By default, each event is assumed to be one line and a line is -taken to be the text before a newline character. -Normally, logging will add a newline to the end of each line written. -If you would like to join multiple log lines into one event, -you'll want to use the multiline codec or filter. - -The plugin aims to track changing files and emit new content as it's -appended to each file. It's not well-suited for reading a file from -beginning to end and storing all of it in a single event (not even -with the multiline codec or filter). - -==== Reading from remote network volumes - -The file input is not tested on remote filesystems such as NFS, Samba, s3fs-fuse, etc. These -remote filesystems typically have behaviors that are very different from local filesystems and -are therefore unlikely to work correctly when used with the file input. - -==== Tracking of current position in watched files - -The plugin keeps track of the current position in each file by -recording it in a separate file named sincedb. This makes it -possible to stop and restart Logstash and have it pick up where it -left off without missing the lines that were added to the file while -Logstash was stopped. - -By default, the sincedb file is placed in the home directory of the -user running Logstash with a filename based on the filename patterns -being watched (i.e. the `path` option). Thus, changing the filename -patterns will result in a new sincedb file being used and any -existing current position state will be lost. If you change your -patterns with any frequency it might make sense to explicitly choose -a sincedb path with the `sincedb_path` option. - -A different `sincedb_path` must be used for each input. Using the same -path will cause issues. The read checkpoints for each input must be -stored in a different path so the information does not override. - -Sincedb files are text files with four columns: - -. The inode number (or equivalent). -. The major device number of the file system (or equivalent). -. The minor device number of the file system (or equivalent). -. The current byte offset within the file. - -On non-Windows systems you can obtain the inode number of a file -with e.g. `ls -li`. - -==== File rotation - -File rotation is detected and handled by this input, regardless of -whether the file is rotated via a rename or a copy operation. To -support programs that write to the rotated file for some time after -the rotation has taken place, include both the original filename and -the rotated filename (e.g. /var/log/syslog and /var/log/syslog.1) in -the filename patterns to watch (the `path` option). Note that the -rotated filename will be treated as a new file so if -`start_position` is set to 'beginning' the rotated file will be -reprocessed. - -With the default value of `start_position` ('end') any messages -written to the end of the file between the last read operation prior -to the rotation and its reopening under the new name (an interval -determined by the `stat_interval` and `discover_interval` options) -will not get picked up. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== File Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-close_older>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-discover_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ignore_older>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_open_files>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_write_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-start_position>> |<>, one of `["beginning", "end"]`|No -| <<{version}-plugins-{type}s-{plugin}-stat_interval>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-close_older"] -===== `close_older` - - * Value type is <> - * Default value is `3600` - -The file input closes any files that were last read the specified -timespan in seconds ago. -This has different implications depending on if a file is being tailed or -read. If tailing, and there is a large time gap in incoming data the file -can be closed (allowing other files to be opened) but will be queued for -reopening when new data is detected. If reading, the file will be closed -after closed_older seconds from when the last bytes were read. -The default is 1 hour - -[id="{version}-plugins-{type}s-{plugin}-delimiter"] -===== `delimiter` - - * Value type is <> - * Default value is `"\n"` - -set the new line delimiter, defaults to "\n" - -[id="{version}-plugins-{type}s-{plugin}-discover_interval"] -===== `discover_interval` - - * Value type is <> - * Default value is `15` - -How often (in seconds) we expand the filename patterns in the -`path` option to discover new files to watch. - -[id="{version}-plugins-{type}s-{plugin}-exclude"] -===== `exclude` - - * Value type is <> - * There is no default value for this setting. - -Exclusions (matched against the filename, not full path). Filename -patterns are valid here, too. For example, if you have -[source,ruby] - path => "/var/log/*" - -You might want to exclude gzipped files: -[source,ruby] - exclude => "*.gz" - -[id="{version}-plugins-{type}s-{plugin}-ignore_older"] -===== `ignore_older` - - * Value type is <> - * There is no default value for this setting. - -When the file input discovers a file that was last modified -before the specified timespan in seconds, the file is ignored. -After it's discovery, if an ignored file is modified it is no -longer ignored and any new data is read. By default, this option is -disabled. Note this unit is in seconds. - -[id="{version}-plugins-{type}s-{plugin}-max_open_files"] -===== `max_open_files` - - * Value type is <> - * There is no default value for this setting. - -What is the maximum number of file_handles that this input consumes -at any one time. Use close_older to close some files if you need to -process more files than this number. This should not be set to the -maximum the OS can do because file handles are needed for other -LS plugins and OS processes. -The default of 4095 is set in filewatch. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The path(s) to the file(s) to use as an input. -You can use filename patterns here, such as `/var/log/*.log`. -If you use a pattern like `/var/log/**/*.log`, a recursive search -of `/var/log` will be done for all `*.log` files. -Paths must be absolute and cannot be relative. - -You may also configure multiple paths. See an example -on the <>. - -[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] -===== `sincedb_path` - - * Value type is <> - * There is no default value for this setting. - -Path of the sincedb database file (keeps track of the current -position of monitored log files) that will be written to disk. -The default will write sincedb files to `/plugins/inputs/file` -NOTE: it must be a file path and not a directory path - -[id="{version}-plugins-{type}s-{plugin}-sincedb_write_interval"] -===== `sincedb_write_interval` - - * Value type is <> - * Default value is `15` - -How often (in seconds) to write a since database with the current position of -monitored log files. - -[id="{version}-plugins-{type}s-{plugin}-start_position"] -===== `start_position` - - * Value can be any of: `beginning`, `end` - * Default value is `"end"` - -Choose where Logstash starts initially reading files: at the beginning or -at the end. The default behavior treats files like live streams and thus -starts at the end. If you have old data you want to import, set this -to 'beginning'. - -This option only modifies "first contact" situations where a file -is new and not seen before, i.e. files that don't have a current -position recorded in a sincedb file read by Logstash. If a file -has already been seen before, this option has no effect and the -position recorded in the sincedb file will be used. - -[id="{version}-plugins-{type}s-{plugin}-stat_interval"] -===== `stat_interval` - - * Value type is <> - * Default value is `1` - -How often (in seconds) we stat files to see if they have been modified. -Increasing this interval will decrease the number of system calls we make, -but increase the time to detect new log lines. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/fluentd-index.asciidoc b/docs/versioned-plugins/inputs/fluentd-index.asciidoc deleted file mode 100644 index 1eba3bdba..000000000 --- a/docs/versioned-plugins/inputs/fluentd-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: fluentd -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/inputs/ganglia-index.asciidoc b/docs/versioned-plugins/inputs/ganglia-index.asciidoc deleted file mode 100644 index 0f3dc974f..000000000 --- a/docs/versioned-plugins/inputs/ganglia-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: ganglia -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::ganglia-v3.1.3.asciidoc[] -include::ganglia-v3.1.2.asciidoc[] -include::ganglia-v3.1.1.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/ganglia-v3.1.1.asciidoc b/docs/versioned-plugins/inputs/ganglia-v3.1.1.asciidoc deleted file mode 100644 index d3e0e1fe2..000000000 --- a/docs/versioned-plugins/inputs/ganglia-v3.1.1.asciidoc +++ /dev/null @@ -1,63 +0,0 @@ -:plugin: ganglia -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-ganglia/blob/v3.1.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Ganglia input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read ganglia packets from the network via udp - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Ganglia Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address to listen on - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `8649` - -The port to listen on. Remember that ports less than 1024 (privileged -ports) may require root to use. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/ganglia-v3.1.2.asciidoc b/docs/versioned-plugins/inputs/ganglia-v3.1.2.asciidoc deleted file mode 100644 index ba7dfbe11..000000000 --- a/docs/versioned-plugins/inputs/ganglia-v3.1.2.asciidoc +++ /dev/null @@ -1,63 +0,0 @@ -:plugin: ganglia -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-ganglia/blob/v3.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Ganglia input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read ganglia packets from the network via udp - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Ganglia Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address to listen on - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `8649` - -The port to listen on. Remember that ports less than 1024 (privileged -ports) may require root to use. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/ganglia-v3.1.3.asciidoc b/docs/versioned-plugins/inputs/ganglia-v3.1.3.asciidoc deleted file mode 100644 index 916688da2..000000000 --- a/docs/versioned-plugins/inputs/ganglia-v3.1.3.asciidoc +++ /dev/null @@ -1,63 +0,0 @@ -:plugin: ganglia -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-ganglia/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Ganglia input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read ganglia packets from the network via udp - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Ganglia Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address to listen on - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `8649` - -The port to listen on. Remember that ports less than 1024 (privileged -ports) may require root to use. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/gelf-index.asciidoc b/docs/versioned-plugins/inputs/gelf-index.asciidoc deleted file mode 100644 index f951c59ba..000000000 --- a/docs/versioned-plugins/inputs/gelf-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: gelf -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-14 -| <> | 2017-08-15 -| <> | 2017-06-27 -| <> | 2017-06-23 -|======================================================================= - -include::gelf-v3.0.7.asciidoc[] -include::gelf-v3.0.6.asciidoc[] -include::gelf-v3.0.5.asciidoc[] -include::gelf-v3.0.4.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/gelf-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/gelf-v3.0.4.asciidoc deleted file mode 100644 index f18cfbb4b..000000000 --- a/docs/versioned-plugins/inputs/gelf-v3.0.4.asciidoc +++ /dev/null @@ -1,105 +0,0 @@ -:plugin: gelf -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-gelf/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Gelf input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will read GELF messages as events over the network, -making it a good choice if you already use Graylog2 today. - -The main use case for this input is to leverage existing GELF -logging libraries such as the GELF log4j appender. A library used -by this plugin has a bug which prevents it parsing uncompressed data. -If you use the log4j appender you need to configure it like this to force -gzip even for small messages: - - - - - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Gelf Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-remap>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-strip_leading_underscore>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The IP address or hostname to listen on. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `12201` - -The port to listen on. Remember that ports less than 1024 (privileged -ports) may require root to use. - -[id="{version}-plugins-{type}s-{plugin}-remap"] -===== `remap` - - * Value type is <> - * Default value is `true` - -Whether or not to remap the GELF message fields to Logstash event fields or -leave them intact. - -Remapping converts the following GELF fields to Logstash equivalents: - -* `full\_message` becomes `event.get("message")`. -* if there is no `full\_message`, `short\_message` becomes `event.get("message")`. - -[id="{version}-plugins-{type}s-{plugin}-strip_leading_underscore"] -===== `strip_leading_underscore` - - * Value type is <> - * Default value is `true` - -Whether or not to remove the leading `\_` in GELF fields or leave them -in place. (Logstash < 1.2 did not remove them by default.). Note that -GELF version 1.1 format now requires all non-standard fields to be added -as an "additional" field, beginning with an underscore. - -e.g. `\_foo` becomes `foo` - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/gelf-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/gelf-v3.0.5.asciidoc deleted file mode 100644 index 3ef29e5de..000000000 --- a/docs/versioned-plugins/inputs/gelf-v3.0.5.asciidoc +++ /dev/null @@ -1,105 +0,0 @@ -:plugin: gelf -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-06-27 -:changelog_url: https://github.com/logstash-plugins/logstash-input-gelf/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Gelf input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will read GELF messages as events over the network, -making it a good choice if you already use Graylog2 today. - -The main use case for this input is to leverage existing GELF -logging libraries such as the GELF log4j appender. A library used -by this plugin has a bug which prevents it parsing uncompressed data. -If you use the log4j appender you need to configure it like this to force -gzip even for small messages: - - - - - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Gelf Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-remap>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-strip_leading_underscore>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The IP address or hostname to listen on. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `12201` - -The port to listen on. Remember that ports less than 1024 (privileged -ports) may require root to use. - -[id="{version}-plugins-{type}s-{plugin}-remap"] -===== `remap` - - * Value type is <> - * Default value is `true` - -Whether or not to remap the GELF message fields to Logstash event fields or -leave them intact. - -Remapping converts the following GELF fields to Logstash equivalents: - -* `full\_message` becomes `event.get("message")`. -* if there is no `full\_message`, `short\_message` becomes `event.get("message")`. - -[id="{version}-plugins-{type}s-{plugin}-strip_leading_underscore"] -===== `strip_leading_underscore` - - * Value type is <> - * Default value is `true` - -Whether or not to remove the leading `\_` in GELF fields or leave them -in place. (Logstash < 1.2 did not remove them by default.). Note that -GELF version 1.1 format now requires all non-standard fields to be added -as an "additional" field, beginning with an underscore. - -e.g. `\_foo` becomes `foo` - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/gelf-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/gelf-v3.0.6.asciidoc deleted file mode 100644 index 234645899..000000000 --- a/docs/versioned-plugins/inputs/gelf-v3.0.6.asciidoc +++ /dev/null @@ -1,105 +0,0 @@ -:plugin: gelf -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-gelf/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Gelf input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will read GELF messages as events over the network, -making it a good choice if you already use Graylog2 today. - -The main use case for this input is to leverage existing GELF -logging libraries such as the GELF log4j appender. A library used -by this plugin has a bug which prevents it parsing uncompressed data. -If you use the log4j appender you need to configure it like this to force -gzip even for small messages: - - - - - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Gelf Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-remap>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-strip_leading_underscore>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The IP address or hostname to listen on. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `12201` - -The port to listen on. Remember that ports less than 1024 (privileged -ports) may require root to use. - -[id="{version}-plugins-{type}s-{plugin}-remap"] -===== `remap` - - * Value type is <> - * Default value is `true` - -Whether or not to remap the GELF message fields to Logstash event fields or -leave them intact. - -Remapping converts the following GELF fields to Logstash equivalents: - -* `full\_message` becomes `event.get("message")`. -* if there is no `full\_message`, `short\_message` becomes `event.get("message")`. - -[id="{version}-plugins-{type}s-{plugin}-strip_leading_underscore"] -===== `strip_leading_underscore` - - * Value type is <> - * Default value is `true` - -Whether or not to remove the leading `\_` in GELF fields or leave them -in place. (Logstash < 1.2 did not remove them by default.). Note that -GELF version 1.1 format now requires all non-standard fields to be added -as an "additional" field, beginning with an underscore. - -e.g. `\_foo` becomes `foo` - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/gelf-v3.0.7.asciidoc b/docs/versioned-plugins/inputs/gelf-v3.0.7.asciidoc deleted file mode 100644 index 098f6d9cd..000000000 --- a/docs/versioned-plugins/inputs/gelf-v3.0.7.asciidoc +++ /dev/null @@ -1,105 +0,0 @@ -:plugin: gelf -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.7 -:release_date: 2017-11-14 -:changelog_url: https://github.com/logstash-plugins/logstash-input-gelf/blob/v3.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Gelf input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will read GELF messages as events over the network, -making it a good choice if you already use Graylog2 today. - -The main use case for this input is to leverage existing GELF -logging libraries such as the GELF log4j appender. A library used -by this plugin has a bug which prevents it parsing uncompressed data. -If you use the log4j appender you need to configure it like this to force -gzip even for small messages: - - - - - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Gelf Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-remap>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-strip_leading_underscore>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The IP address or hostname to listen on. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `12201` - -The port to listen on. Remember that ports less than 1024 (privileged -ports) may require root to use. - -[id="{version}-plugins-{type}s-{plugin}-remap"] -===== `remap` - - * Value type is <> - * Default value is `true` - -Whether or not to remap the GELF message fields to Logstash event fields or -leave them intact. - -Remapping converts the following GELF fields to Logstash equivalents: - -* `full\_message` becomes `event.get("message")`. -* if there is no `full\_message`, `short\_message` becomes `event.get("message")`. - -[id="{version}-plugins-{type}s-{plugin}-strip_leading_underscore"] -===== `strip_leading_underscore` - - * Value type is <> - * Default value is `true` - -Whether or not to remove the leading `\_` in GELF fields or leave them -in place. (Logstash < 1.2 did not remove them by default.). Note that -GELF version 1.1 format now requires all non-standard fields to be added -as an "additional" field, beginning with an underscore. - -e.g. `\_foo` becomes `foo` - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/gemfire-index.asciidoc b/docs/versioned-plugins/inputs/gemfire-index.asciidoc deleted file mode 100644 index db38e5844..000000000 --- a/docs/versioned-plugins/inputs/gemfire-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: gemfire -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::gemfire-v2.0.6.asciidoc[] -include::gemfire-v2.0.5.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/gemfire-v2.0.5.asciidoc b/docs/versioned-plugins/inputs/gemfire-v2.0.5.asciidoc deleted file mode 100644 index 37c51a53e..000000000 --- a/docs/versioned-plugins/inputs/gemfire-v2.0.5.asciidoc +++ /dev/null @@ -1,132 +0,0 @@ -:plugin: gemfire -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-gemfire/blob/v2.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Gemfire input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push events to a GemFire region. - -GemFire is an object database. - -To use this plugin you need to add gemfire.jar to your CLASSPATH. -Using format=json requires jackson.jar too; use of continuous -queries requires antlr.jar. - -Note: this plugin has only been tested with GemFire 7.0. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Gemfire Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cache_xml_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interest_regexp>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-serialization>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_name"] -===== `cache_name` - - * Value type is <> - * Default value is `"logstash"` - -Your client cache name - -[id="{version}-plugins-{type}s-{plugin}-cache_xml_file"] -===== `cache_xml_file` - - * Value type is <> - * Default value is `nil` - -The path to a GemFire client cache XML file. - -Example: - - - - - - - - - - - - -[id="{version}-plugins-{type}s-{plugin}-interest_regexp"] -===== `interest_regexp` - - * Value type is <> - * Default value is `".*"` - -A regexp to use when registering interest for cache events. -Ignored if a :query is specified. - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * Value type is <> - * Default value is `nil` - -A query to run as a GemFire "continuous query"; if specified it takes -precedence over :interest_regexp which will be ignore. - -Important: use of continuous queries requires subscriptions to be enabled on the client pool. - -[id="{version}-plugins-{type}s-{plugin}-region_name"] -===== `region_name` - - * Value type is <> - * Default value is `"Logstash"` - -The region name - -[id="{version}-plugins-{type}s-{plugin}-serialization"] -===== `serialization` - - * Value type is <> - * Default value is `nil` - -How the message is serialized in the cache. Can be one of "json" or "plain"; default is plain - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/gemfire-v2.0.6.asciidoc b/docs/versioned-plugins/inputs/gemfire-v2.0.6.asciidoc deleted file mode 100644 index df87bca2d..000000000 --- a/docs/versioned-plugins/inputs/gemfire-v2.0.6.asciidoc +++ /dev/null @@ -1,132 +0,0 @@ -:plugin: gemfire -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.6 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-gemfire/blob/v2.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Gemfire input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push events to a GemFire region. - -GemFire is an object database. - -To use this plugin you need to add gemfire.jar to your CLASSPATH. -Using format=json requires jackson.jar too; use of continuous -queries requires antlr.jar. - -Note: this plugin has only been tested with GemFire 7.0. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Gemfire Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cache_xml_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interest_regexp>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-serialization>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_name"] -===== `cache_name` - - * Value type is <> - * Default value is `"logstash"` - -Your client cache name - -[id="{version}-plugins-{type}s-{plugin}-cache_xml_file"] -===== `cache_xml_file` - - * Value type is <> - * Default value is `nil` - -The path to a GemFire client cache XML file. - -Example: - - - - - - - - - - - - -[id="{version}-plugins-{type}s-{plugin}-interest_regexp"] -===== `interest_regexp` - - * Value type is <> - * Default value is `".*"` - -A regexp to use when registering interest for cache events. -Ignored if a :query is specified. - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * Value type is <> - * Default value is `nil` - -A query to run as a GemFire "continuous query"; if specified it takes -precedence over :interest_regexp which will be ignore. - -Important: use of continuous queries requires subscriptions to be enabled on the client pool. - -[id="{version}-plugins-{type}s-{plugin}-region_name"] -===== `region_name` - - * Value type is <> - * Default value is `"Logstash"` - -The region name - -[id="{version}-plugins-{type}s-{plugin}-serialization"] -===== `serialization` - - * Value type is <> - * Default value is `nil` - -How the message is serialized in the cache. Can be one of "json" or "plain"; default is plain - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/generator-index.asciidoc b/docs/versioned-plugins/inputs/generator-index.asciidoc deleted file mode 100644 index 0e38e0ae6..000000000 --- a/docs/versioned-plugins/inputs/generator-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: generator -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::generator-v3.0.5.asciidoc[] -include::generator-v3.0.4.asciidoc[] -include::generator-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/generator-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/generator-v3.0.3.asciidoc deleted file mode 100644 index 06800174b..000000000 --- a/docs/versioned-plugins/inputs/generator-v3.0.3.asciidoc +++ /dev/null @@ -1,107 +0,0 @@ -:plugin: generator -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-generator/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Generator input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Generate random log events. - -The general intention of this is to test performance of plugins. - -An event is generated first - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Generator Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lines>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-count"] -===== `count` - - * Value type is <> - * Default value is `0` - -Set how many messages should be generated. - -The default, `0`, means generate an unlimited number of events. - -[id="{version}-plugins-{type}s-{plugin}-lines"] -===== `lines` - - * Value type is <> - * There is no default value for this setting. - -The lines to emit, in order. This option cannot be used with the 'message' -setting. - -Example: -[source,ruby] - input { - generator { - lines => [ - "line 1", - "line 2", - "line 3" - ] - # Emit all lines 3 times. - count => 3 - } - } - -The above will emit `line 1` then `line 2` then `line`, then `line 1`, etc... - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * Default value is `"Hello world!"` - -The message string to use in the event. - -If you set this to `stdin` then this plugin will read a single line from -stdin and use that as the message string for every event. - -Otherwise, this value will be used verbatim as the event message. - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/generator-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/generator-v3.0.4.asciidoc deleted file mode 100644 index 8bdfcce00..000000000 --- a/docs/versioned-plugins/inputs/generator-v3.0.4.asciidoc +++ /dev/null @@ -1,107 +0,0 @@ -:plugin: generator -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-generator/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Generator input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Generate random log events. - -The general intention of this is to test performance of plugins. - -An event is generated first - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Generator Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lines>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-count"] -===== `count` - - * Value type is <> - * Default value is `0` - -Set how many messages should be generated. - -The default, `0`, means generate an unlimited number of events. - -[id="{version}-plugins-{type}s-{plugin}-lines"] -===== `lines` - - * Value type is <> - * There is no default value for this setting. - -The lines to emit, in order. This option cannot be used with the 'message' -setting. - -Example: -[source,ruby] - input { - generator { - lines => [ - "line 1", - "line 2", - "line 3" - ] - # Emit all lines 3 times. - count => 3 - } - } - -The above will emit `line 1` then `line 2` then `line`, then `line 1`, etc... - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * Default value is `"Hello world!"` - -The message string to use in the event. - -If you set this to `stdin` then this plugin will read a single line from -stdin and use that as the message string for every event. - -Otherwise, this value will be used verbatim as the event message. - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/generator-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/generator-v3.0.5.asciidoc deleted file mode 100644 index 15c46d454..000000000 --- a/docs/versioned-plugins/inputs/generator-v3.0.5.asciidoc +++ /dev/null @@ -1,107 +0,0 @@ -:plugin: generator -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-generator/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Generator input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Generate random log events. - -The general intention of this is to test performance of plugins. - -An event is generated first - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Generator Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lines>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-count"] -===== `count` - - * Value type is <> - * Default value is `0` - -Set how many messages should be generated. - -The default, `0`, means generate an unlimited number of events. - -[id="{version}-plugins-{type}s-{plugin}-lines"] -===== `lines` - - * Value type is <> - * There is no default value for this setting. - -The lines to emit, in order. This option cannot be used with the 'message' -setting. - -Example: -[source,ruby] - input { - generator { - lines => [ - "line 1", - "line 2", - "line 3" - ] - # Emit all lines 3 times. - count => 3 - } - } - -The above will emit `line 1` then `line 2` then `line`, then `line 1`, etc... - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * Default value is `"Hello world!"` - -The message string to use in the event. - -If you set this to `stdin` then this plugin will read a single line from -stdin and use that as the message string for every event. - -Otherwise, this value will be used verbatim as the event message. - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/github-index.asciidoc b/docs/versioned-plugins/inputs/github-index.asciidoc deleted file mode 100644 index bad8448da..000000000 --- a/docs/versioned-plugins/inputs/github-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: github -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::github-v3.0.5.asciidoc[] -include::github-v3.0.4.asciidoc[] -include::github-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/github-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/github-v3.0.3.asciidoc deleted file mode 100644 index b20fa9d36..000000000 --- a/docs/versioned-plugins/inputs/github-v3.0.3.asciidoc +++ /dev/null @@ -1,81 +0,0 @@ -:plugin: github -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-github/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Github input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from github webhooks - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Github Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-drop_invalid>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-secret_token>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-drop_invalid"] -===== `drop_invalid` - - * Value type is <> - * Default value is `false` - -If Secret is defined, we drop the events that don't match. -Otherwise, we'll just add an invalid tag - -[id="{version}-plugins-{type}s-{plugin}-ip"] -===== `ip` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The ip to listen on - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The port to listen on - -[id="{version}-plugins-{type}s-{plugin}-secret_token"] -===== `secret_token` - - * Value type is <> - * There is no default value for this setting. - -Your GitHub Secret Token for the webhook - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/github-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/github-v3.0.4.asciidoc deleted file mode 100644 index 38480ae4b..000000000 --- a/docs/versioned-plugins/inputs/github-v3.0.4.asciidoc +++ /dev/null @@ -1,81 +0,0 @@ -:plugin: github -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-github/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Github input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from github webhooks - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Github Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-drop_invalid>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-secret_token>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-drop_invalid"] -===== `drop_invalid` - - * Value type is <> - * Default value is `false` - -If Secret is defined, we drop the events that don't match. -Otherwise, we'll just add an invalid tag - -[id="{version}-plugins-{type}s-{plugin}-ip"] -===== `ip` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The ip to listen on - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The port to listen on - -[id="{version}-plugins-{type}s-{plugin}-secret_token"] -===== `secret_token` - - * Value type is <> - * There is no default value for this setting. - -Your GitHub Secret Token for the webhook - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/github-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/github-v3.0.5.asciidoc deleted file mode 100644 index 472d2409e..000000000 --- a/docs/versioned-plugins/inputs/github-v3.0.5.asciidoc +++ /dev/null @@ -1,81 +0,0 @@ -:plugin: github -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-github/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Github input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from github webhooks - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Github Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-drop_invalid>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-secret_token>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-drop_invalid"] -===== `drop_invalid` - - * Value type is <> - * Default value is `false` - -If Secret is defined, we drop the events that don't match. -Otherwise, we'll just add an invalid tag - -[id="{version}-plugins-{type}s-{plugin}-ip"] -===== `ip` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The ip to listen on - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The port to listen on - -[id="{version}-plugins-{type}s-{plugin}-secret_token"] -===== `secret_token` - - * Value type is <> - * There is no default value for this setting. - -Your GitHub Secret Token for the webhook - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/google_pubsub-index.asciidoc b/docs/versioned-plugins/inputs/google_pubsub-index.asciidoc deleted file mode 100644 index c1869c577..000000000 --- a/docs/versioned-plugins/inputs/google_pubsub-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: google_pubsub -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::google_pubsub-v1.0.4.asciidoc[] -include::google_pubsub-v1.0.3.asciidoc[] -include::google_pubsub-v1.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/google_pubsub-v1.0.1.asciidoc b/docs/versioned-plugins/inputs/google_pubsub-v1.0.1.asciidoc deleted file mode 100644 index e1e8636fe..000000000 --- a/docs/versioned-plugins/inputs/google_pubsub-v1.0.1.asciidoc +++ /dev/null @@ -1,213 +0,0 @@ -:plugin: google_pubsub -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-google_pubsub/blob/v1.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Google_pubsub input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Author: Eric Johnson -Date: 2016-06-01 - -Copyright 2016 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -Google deps -This is a https://github.com/elastic/logstash[Logstash] input plugin for -https://cloud.google.com/pubsub/[Google Pub/Sub]. The plugin can subscribe -to a topic and ingest messages. - -The main motivation behind the development of this plugin was to ingest -https://cloud.google.com/logging/[Stackdriver Logging] messages via the -https://cloud.google.com/logging/docs/export/using_exported_logs[Exported Logs] -feature of Stackdriver Logging. - -==== Prerequisites - -You must first create a Google Cloud Platform project and enable the the -Google Pub/Sub API. If you intend to use the plugin ingest Stackdriver Logging -messages, you must also enable the Stackdriver Logging API and configure log -exporting to Pub/Sub. There is plentiful information on -https://cloud.google.com/ to get started: - -- Google Cloud Platform Projects and https://cloud.google.com/docs/overview/[Overview] -- Google Cloud Pub/Sub https://cloud.google.com/pubsub/[documentation] -- Stackdriver Logging https://cloud.google.com/logging/[documentation] - -==== Cloud Pub/Sub - -Currently, this module requires you to create a `topic` manually and specify -it in the logstash config file. You must also specify a `subscription`, but -the plugin will attempt to create the pull-based `subscription` on its own. - -All messages received from Pub/Sub will be converted to a logstash `event` -and added to the processing pipeline queue. All Pub/Sub messages will be -`acknowledged` and removed from the Pub/Sub `topic` (please see more about -https://cloud.google.com/pubsub/overview#concepts)[Pub/Sub concepts]. - -It is generally assumed that incoming messages will be in JSON and added to -the logstash `event` as-is. However, if a plain text message is received, the -plugin will return the raw text in as `raw_message` in the logstash `event`. - -==== Authentication - -You have two options for authentication depending on where you run Logstash. - -1. If you are running Logstash outside of Google Cloud Platform, then you will -need to create a Google Cloud Platform Service Account and specify the full -path to the JSON private key file in your config. You must assign sufficient -roles to the Service Account to create a subscription and to pull messages -from the subscription. Learn more about GCP Service Accounts and IAM roles -here: - - - Google Cloud Platform IAM https://cloud.google.com/iam/[overview] - - Creating Service Accounts https://cloud.google.com/iam/docs/creating-managing-service-accounts[overview] - - Granting Roles https://cloud.google.com/iam/docs/granting-roles-to-service-accounts[overview] - -1. If you are running Logstash on a Google Compute Engine instance, you may opt -to use Application Default Credentials. In this case, you will not need to -specify a JSON private key file in your config. - -==== Stackdriver Logging (optional) - -If you intend to use the logstash plugin for Stackdriver Logging message -ingestion, you must first manually set up the Export option to Cloud Pub/Sub and -the manually create the `topic`. Please see the more detailed instructions at, -https://cloud.google.com/logging/docs/export/using_exported_logs [Exported Logs] -and ensure that the https://cloud.google.com/logging/docs/export/configure_export#manual-access-pubsub[necessary permissions] -have also been manually configured. - -Logging messages from Stackdriver Logging exported to Pub/Sub are received as -JSON and converted to a logstash `event` as-is in -https://cloud.google.com/logging/docs/export/using_exported_logs#log_entries_in_google_pubsub_topics[this format]. - -==== Sample Configuration - -Below is a copy of the included `example.conf-tmpl` file that shows a basic -configuration for this plugin. - -[source,ruby] ----------------------------------- -input { - google_pubsub { - # Your GCP project id (name) - project_id => "my-project-1234" - - # The topic name below is currently hard-coded in the plugin. You - # must first create this topic by hand and ensure you are exporting - # logging to this pubsub topic. - topic => "logstash-input-dev" - - # The subscription name is customizeable. The plugin will attempt to - # create the subscription (but use the hard-coded topic name above). - subscription => "logstash-sub" - - # If you are running logstash within GCE, it will use - # Application Default Credentials and use GCE's metadata - # service to fetch tokens. However, if you are running logstash - # outside of GCE, you will need to specify the service account's - # JSON key file below. - #json_key_file => "/home/erjohnso/pkey.json" - } -} -output { stdout { codec => rubydebug } } ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Google_pubsub Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-json_key_file>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-max_messages>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-subscription>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-topic>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-json_key_file"] -===== `json_key_file` - - * Value type is <> - * There is no default value for this setting. - -If logstash is running within Google Compute Engine, the plugin will use -GCE's Application Default Credentials. Outside of GCE, you will need to -specify a Service Account JSON key file. - -[id="{version}-plugins-{type}s-{plugin}-max_messages"] -===== `max_messages` - - * This is a required setting. - * Value type is <> - * Default value is `5` - - - -[id="{version}-plugins-{type}s-{plugin}-project_id"] -===== `project_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Google Cloud Project ID (name, not number) - -[id="{version}-plugins-{type}s-{plugin}-subscription"] -===== `subscription` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-topic"] -===== `topic` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Google Cloud Pub/Sub Topic and Subscription. -Note that the topic must be created manually with Cloud Logging -pre-configured export to PubSub configured to use the defined topic. -The subscription will be created automatically by the plugin. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/google_pubsub-v1.0.3.asciidoc b/docs/versioned-plugins/inputs/google_pubsub-v1.0.3.asciidoc deleted file mode 100644 index bcf677ac6..000000000 --- a/docs/versioned-plugins/inputs/google_pubsub-v1.0.3.asciidoc +++ /dev/null @@ -1,213 +0,0 @@ -:plugin: google_pubsub -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.3 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-google_pubsub/blob/v1.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Google_pubsub input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Author: Eric Johnson -Date: 2016-06-01 - -Copyright 2016 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -Google deps -This is a https://github.com/elastic/logstash[Logstash] input plugin for -https://cloud.google.com/pubsub/[Google Pub/Sub]. The plugin can subscribe -to a topic and ingest messages. - -The main motivation behind the development of this plugin was to ingest -https://cloud.google.com/logging/[Stackdriver Logging] messages via the -https://cloud.google.com/logging/docs/export/using_exported_logs[Exported Logs] -feature of Stackdriver Logging. - -==== Prerequisites - -You must first create a Google Cloud Platform project and enable the -Google Pub/Sub API. If you intend to use the plugin ingest Stackdriver Logging -messages, you must also enable the Stackdriver Logging API and configure log -exporting to Pub/Sub. There is plentiful information on -https://cloud.google.com/ to get started: - -- Google Cloud Platform Projects and https://cloud.google.com/docs/overview/[Overview] -- Google Cloud Pub/Sub https://cloud.google.com/pubsub/[documentation] -- Stackdriver Logging https://cloud.google.com/logging/[documentation] - -==== Cloud Pub/Sub - -Currently, this module requires you to create a `topic` manually and specify -it in the logstash config file. You must also specify a `subscription`, but -the plugin will attempt to create the pull-based `subscription` on its own. - -All messages received from Pub/Sub will be converted to a logstash `event` -and added to the processing pipeline queue. All Pub/Sub messages will be -`acknowledged` and removed from the Pub/Sub `topic` (please see more about -https://cloud.google.com/pubsub/overview#concepts)[Pub/Sub concepts]. - -It is generally assumed that incoming messages will be in JSON and added to -the logstash `event` as-is. However, if a plain text message is received, the -plugin will return the raw text in as `raw_message` in the logstash `event`. - -==== Authentication - -You have two options for authentication depending on where you run Logstash. - -1. If you are running Logstash outside of Google Cloud Platform, then you will -need to create a Google Cloud Platform Service Account and specify the full -path to the JSON private key file in your config. You must assign sufficient -roles to the Service Account to create a subscription and to pull messages -from the subscription. Learn more about GCP Service Accounts and IAM roles -here: - - - Google Cloud Platform IAM https://cloud.google.com/iam/[overview] - - Creating Service Accounts https://cloud.google.com/iam/docs/creating-managing-service-accounts[overview] - - Granting Roles https://cloud.google.com/iam/docs/granting-roles-to-service-accounts[overview] - -2. If you are running Logstash on a Google Compute Engine instance, you may opt -to use Application Default Credentials. In this case, you will not need to -specify a JSON private key file in your config. - -==== Stackdriver Logging (optional) - -If you intend to use the logstash plugin for Stackdriver Logging message -ingestion, you must first manually set up the Export option to Cloud Pub/Sub and -the manually create the `topic`. Please see the more detailed instructions at, -https://cloud.google.com/logging/docs/export/using_exported_logs [Exported Logs] -and ensure that the https://cloud.google.com/logging/docs/export/configure_export#manual-access-pubsub[necessary permissions] -have also been manually configured. - -Logging messages from Stackdriver Logging exported to Pub/Sub are received as -JSON and converted to a logstash `event` as-is in -https://cloud.google.com/logging/docs/export/using_exported_logs#log_entries_in_google_pubsub_topics[this format]. - -==== Sample Configuration - -Below is a copy of the included `example.conf-tmpl` file that shows a basic -configuration for this plugin. - -[source,ruby] ----------------------------------- -input { - google_pubsub { - # Your GCP project id (name) - project_id => "my-project-1234" - - # The topic name below is currently hard-coded in the plugin. You - # must first create this topic by hand and ensure you are exporting - # logging to this pubsub topic. - topic => "logstash-input-dev" - - # The subscription name is customizeable. The plugin will attempt to - # create the subscription (but use the hard-coded topic name above). - subscription => "logstash-sub" - - # If you are running logstash within GCE, it will use - # Application Default Credentials and use GCE's metadata - # service to fetch tokens. However, if you are running logstash - # outside of GCE, you will need to specify the service account's - # JSON key file below. - #json_key_file => "/home/erjohnso/pkey.json" - } -} -output { stdout { codec => rubydebug } } ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Google_pubsub Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-json_key_file>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-max_messages>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-subscription>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-topic>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-json_key_file"] -===== `json_key_file` - - * Value type is <> - * There is no default value for this setting. - -If logstash is running within Google Compute Engine, the plugin will use -GCE's Application Default Credentials. Outside of GCE, you will need to -specify a Service Account JSON key file. - -[id="{version}-plugins-{type}s-{plugin}-max_messages"] -===== `max_messages` - - * This is a required setting. - * Value type is <> - * Default value is `5` - - - -[id="{version}-plugins-{type}s-{plugin}-project_id"] -===== `project_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Google Cloud Project ID (name, not number) - -[id="{version}-plugins-{type}s-{plugin}-subscription"] -===== `subscription` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-topic"] -===== `topic` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Google Cloud Pub/Sub Topic and Subscription. -Note that the topic must be created manually with Cloud Logging -pre-configured export to PubSub configured to use the defined topic. -The subscription will be created automatically by the plugin. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/google_pubsub-v1.0.4.asciidoc b/docs/versioned-plugins/inputs/google_pubsub-v1.0.4.asciidoc deleted file mode 100644 index 3f869429b..000000000 --- a/docs/versioned-plugins/inputs/google_pubsub-v1.0.4.asciidoc +++ /dev/null @@ -1,213 +0,0 @@ -:plugin: google_pubsub -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.4 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-google_pubsub/blob/v1.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Google_pubsub input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Author: Eric Johnson -Date: 2016-06-01 - -Copyright 2016 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -Google deps -This is a https://github.com/elastic/logstash[Logstash] input plugin for -https://cloud.google.com/pubsub/[Google Pub/Sub]. The plugin can subscribe -to a topic and ingest messages. - -The main motivation behind the development of this plugin was to ingest -https://cloud.google.com/logging/[Stackdriver Logging] messages via the -https://cloud.google.com/logging/docs/export/using_exported_logs[Exported Logs] -feature of Stackdriver Logging. - -==== Prerequisites - -You must first create a Google Cloud Platform project and enable the -Google Pub/Sub API. If you intend to use the plugin ingest Stackdriver Logging -messages, you must also enable the Stackdriver Logging API and configure log -exporting to Pub/Sub. There is plentiful information on -https://cloud.google.com/ to get started: - -- Google Cloud Platform Projects and https://cloud.google.com/docs/overview/[Overview] -- Google Cloud Pub/Sub https://cloud.google.com/pubsub/[documentation] -- Stackdriver Logging https://cloud.google.com/logging/[documentation] - -==== Cloud Pub/Sub - -Currently, this module requires you to create a `topic` manually and specify -it in the logstash config file. You must also specify a `subscription`, but -the plugin will attempt to create the pull-based `subscription` on its own. - -All messages received from Pub/Sub will be converted to a logstash `event` -and added to the processing pipeline queue. All Pub/Sub messages will be -`acknowledged` and removed from the Pub/Sub `topic` (please see more about -https://cloud.google.com/pubsub/overview#concepts)[Pub/Sub concepts]. - -It is generally assumed that incoming messages will be in JSON and added to -the logstash `event` as-is. However, if a plain text message is received, the -plugin will return the raw text in as `raw_message` in the logstash `event`. - -==== Authentication - -You have two options for authentication depending on where you run Logstash. - -1. If you are running Logstash outside of Google Cloud Platform, then you will -need to create a Google Cloud Platform Service Account and specify the full -path to the JSON private key file in your config. You must assign sufficient -roles to the Service Account to create a subscription and to pull messages -from the subscription. Learn more about GCP Service Accounts and IAM roles -here: - - - Google Cloud Platform IAM https://cloud.google.com/iam/[overview] - - Creating Service Accounts https://cloud.google.com/iam/docs/creating-managing-service-accounts[overview] - - Granting Roles https://cloud.google.com/iam/docs/granting-roles-to-service-accounts[overview] - -2. If you are running Logstash on a Google Compute Engine instance, you may opt -to use Application Default Credentials. In this case, you will not need to -specify a JSON private key file in your config. - -==== Stackdriver Logging (optional) - -If you intend to use the logstash plugin for Stackdriver Logging message -ingestion, you must first manually set up the Export option to Cloud Pub/Sub and -the manually create the `topic`. Please see the more detailed instructions at, -https://cloud.google.com/logging/docs/export/using_exported_logs [Exported Logs] -and ensure that the https://cloud.google.com/logging/docs/export/configure_export#manual-access-pubsub[necessary permissions] -have also been manually configured. - -Logging messages from Stackdriver Logging exported to Pub/Sub are received as -JSON and converted to a logstash `event` as-is in -https://cloud.google.com/logging/docs/export/using_exported_logs#log_entries_in_google_pubsub_topics[this format]. - -==== Sample Configuration - -Below is a copy of the included `example.conf-tmpl` file that shows a basic -configuration for this plugin. - -[source,ruby] ----------------------------------- -input { - google_pubsub { - # Your GCP project id (name) - project_id => "my-project-1234" - - # The topic name below is currently hard-coded in the plugin. You - # must first create this topic by hand and ensure you are exporting - # logging to this pubsub topic. - topic => "logstash-input-dev" - - # The subscription name is customizeable. The plugin will attempt to - # create the subscription (but use the hard-coded topic name above). - subscription => "logstash-sub" - - # If you are running logstash within GCE, it will use - # Application Default Credentials and use GCE's metadata - # service to fetch tokens. However, if you are running logstash - # outside of GCE, you will need to specify the service account's - # JSON key file below. - #json_key_file => "/home/erjohnso/pkey.json" - } -} -output { stdout { codec => rubydebug } } ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Google_pubsub Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-json_key_file>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-max_messages>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-subscription>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-topic>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-json_key_file"] -===== `json_key_file` - - * Value type is <> - * There is no default value for this setting. - -If logstash is running within Google Compute Engine, the plugin will use -GCE's Application Default Credentials. Outside of GCE, you will need to -specify a Service Account JSON key file. - -[id="{version}-plugins-{type}s-{plugin}-max_messages"] -===== `max_messages` - - * This is a required setting. - * Value type is <> - * Default value is `5` - - - -[id="{version}-plugins-{type}s-{plugin}-project_id"] -===== `project_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Google Cloud Project ID (name, not number) - -[id="{version}-plugins-{type}s-{plugin}-subscription"] -===== `subscription` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-topic"] -===== `topic` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Google Cloud Pub/Sub Topic and Subscription. -Note that the topic must be created manually with Cloud Logging -pre-configured export to PubSub configured to use the defined topic. -The subscription will be created automatically by the plugin. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/googleanalytics-index.asciidoc b/docs/versioned-plugins/inputs/googleanalytics-index.asciidoc deleted file mode 100644 index 65e18bfaa..000000000 --- a/docs/versioned-plugins/inputs/googleanalytics-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: googleanalytics -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/inputs/graphite-index.asciidoc b/docs/versioned-plugins/inputs/graphite-index.asciidoc deleted file mode 100644 index acdaca84a..000000000 --- a/docs/versioned-plugins/inputs/graphite-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: graphite -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-21 -| <> | 2017-06-23 -|======================================================================= - -include::graphite-v3.0.4.asciidoc[] -include::graphite-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/graphite-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/graphite-v3.0.3.asciidoc deleted file mode 100644 index d3965c032..000000000 --- a/docs/versioned-plugins/inputs/graphite-v3.0.3.asciidoc +++ /dev/null @@ -1,175 +0,0 @@ -:plugin: graphite -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-graphite/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Graphite input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Receive graphite metrics. This plugin understands the text-based graphite -carbon protocol. Both `N` and `specific-timestamp` forms are supported, example: -[source,ruby] - mysql.slow_query.count 204 N - haproxy.live_backends 7 1364608909 - -`N` means `now` for a timestamp. This plugin also supports having the time -specified in the metric payload: - -For every metric received from a client, a single event will be emitted with -the metric name as the field (like `mysql.slow_query.count`) and the metric -value as the field's value. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Graphite Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-data_timeout"] -===== `data_timeout` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `-1` - - - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -Read events over a TCP socket. - -Like stdin and file inputs, each event is assumed to be one line of text. - -Can either accept connections from clients or connect to a server, -depending on `mode`. -When mode is `server`, the address to listen on. -When mode is `client`, the address to connect to. - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"server"` - -Mode to operate in. `server` listens for client connections, -`client` connects to a server. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -When mode is `server`, the port to listen on. -When mode is `client`, the port to connect to. - -[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] -===== `proxy_protocol` - - * Value type is <> - * Default value is `false` - -Proxy protocol support, only v1 is supported at this time -http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt - -[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] -===== `ssl_cacert` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] -===== `ssl_cert` - - * Value type is <> - * There is no default value for this setting. - -SSL certificate path - -[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] -===== `ssl_enable` - - * Value type is <> - * Default value is `false` - -Enable SSL (must be set for other `ssl_` options to take effect). - -[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] -===== `ssl_extra_chain_certs` - - * Value type is <> - * Default value is `[]` - -An Array of extra X509 certificates to be added to the certificate chain. -Useful when the CA chain is not necessary in the system store. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is <> - * There is no default value for this setting. - -SSL key path - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is <> - * Default value is `nil` - -SSL key passphrase - -[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] -===== `ssl_verify` - - * Value type is <> - * Default value is `true` - -Verify the identity of the other end of the SSL connection against the CA. -For input, sets the field `sslsubject` to that of the client certificate. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/graphite-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/graphite-v3.0.4.asciidoc deleted file mode 100644 index 87e42fda3..000000000 --- a/docs/versioned-plugins/inputs/graphite-v3.0.4.asciidoc +++ /dev/null @@ -1,175 +0,0 @@ -:plugin: graphite -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-21 -:changelog_url: https://github.com/logstash-plugins/logstash-input-graphite/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Graphite input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Receive graphite metrics. This plugin understands the text-based graphite -carbon protocol. Both `N` and `specific-timestamp` forms are supported, example: -[source,ruby] - mysql.slow_query.count 204 N - haproxy.live_backends 7 1364608909 - -`N` means `now` for a timestamp. This plugin also supports having the time -specified in the metric payload: - -For every metric received from a client, a single event will be emitted with -the metric name as the field (like `mysql.slow_query.count`) and the metric -value as the field's value. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Graphite Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-data_timeout"] -===== `data_timeout` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `-1` - - - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -Read events over a TCP socket. - -Like stdin and file inputs, each event is assumed to be one line of text. - -Can either accept connections from clients or connect to a server, -depending on `mode`. -When mode is `server`, the address to listen on. -When mode is `client`, the address to connect to. - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"server"` - -Mode to operate in. `server` listens for client connections, -`client` connects to a server. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -When mode is `server`, the port to listen on. -When mode is `client`, the port to connect to. - -[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] -===== `proxy_protocol` - - * Value type is <> - * Default value is `false` - -Proxy protocol support, only v1 is supported at this time -http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt - -[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] -===== `ssl_cacert` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] -===== `ssl_cert` - - * Value type is <> - * There is no default value for this setting. - -SSL certificate path - -[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] -===== `ssl_enable` - - * Value type is <> - * Default value is `false` - -Enable SSL (must be set for other `ssl_` options to take effect). - -[id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] -===== `ssl_extra_chain_certs` - - * Value type is <> - * Default value is `[]` - -An Array of extra X509 certificates to be added to the certificate chain. -Useful when the CA chain is not necessary in the system store. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is <> - * There is no default value for this setting. - -SSL key path - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is <> - * Default value is `nil` - -SSL key passphrase - -[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] -===== `ssl_verify` - - * Value type is <> - * Default value is `true` - -Verify the identity of the other end of the SSL connection against the CA. -For input, sets the field `sslsubject` to that of the client certificate. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/heartbeat-index.asciidoc b/docs/versioned-plugins/inputs/heartbeat-index.asciidoc deleted file mode 100644 index 556dcb118..000000000 --- a/docs/versioned-plugins/inputs/heartbeat-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: heartbeat -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::heartbeat-v3.0.5.asciidoc[] -include::heartbeat-v3.0.4.asciidoc[] -include::heartbeat-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/heartbeat-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/heartbeat-v3.0.3.asciidoc deleted file mode 100644 index a56d6b6bc..000000000 --- a/docs/versioned-plugins/inputs/heartbeat-v3.0.3.asciidoc +++ /dev/null @@ -1,97 +0,0 @@ -:plugin: heartbeat -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-heartbeat/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Heartbeat input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Generate heartbeat messages. - -The general intention of this is to test the performance and -availability of Logstash. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Heartbeat Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-count"] -===== `count` - - * Value type is <> - * Default value is `-1` - -How many times to iterate. -This is typically used only for testing purposes. - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `60` - -Set how frequently messages should be sent. - -The default, `60`, means send a message every 60 seconds. - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * Default value is `"ok"` - -The message string to use in the event. - -If you set this to `epoch` then this plugin will use the current -timestamp in unix timestamp (which is by definition, UTC). It will -output this value into a field called `clock` - -If you set this to `sequence` then this plugin will send a sequence of -numbers beginning at 0 and incrementing each interval. It will -output this value into a field called `clock` - -Otherwise, this value will be used verbatim as the event message. It -will output this value into a field called `message` - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/heartbeat-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/heartbeat-v3.0.4.asciidoc deleted file mode 100644 index f9bdc4903..000000000 --- a/docs/versioned-plugins/inputs/heartbeat-v3.0.4.asciidoc +++ /dev/null @@ -1,97 +0,0 @@ -:plugin: heartbeat -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-heartbeat/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Heartbeat input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Generate heartbeat messages. - -The general intention of this is to test the performance and -availability of Logstash. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Heartbeat Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-count"] -===== `count` - - * Value type is <> - * Default value is `-1` - -How many times to iterate. -This is typically used only for testing purposes. - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `60` - -Set how frequently messages should be sent. - -The default, `60`, means send a message every 60 seconds. - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * Default value is `"ok"` - -The message string to use in the event. - -If you set this to `epoch` then this plugin will use the current -timestamp in unix timestamp (which is by definition, UTC). It will -output this value into a field called `clock` - -If you set this to `sequence` then this plugin will send a sequence of -numbers beginning at 0 and incrementing each interval. It will -output this value into a field called `clock` - -Otherwise, this value will be used verbatim as the event message. It -will output this value into a field called `message` - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/heartbeat-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/heartbeat-v3.0.5.asciidoc deleted file mode 100644 index 06d67e436..000000000 --- a/docs/versioned-plugins/inputs/heartbeat-v3.0.5.asciidoc +++ /dev/null @@ -1,97 +0,0 @@ -:plugin: heartbeat -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-heartbeat/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Heartbeat input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Generate heartbeat messages. - -The general intention of this is to test the performance and -availability of Logstash. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Heartbeat Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-count"] -===== `count` - - * Value type is <> - * Default value is `-1` - -How many times to iterate. -This is typically used only for testing purposes. - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `60` - -Set how frequently messages should be sent. - -The default, `60`, means send a message every 60 seconds. - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * Default value is `"ok"` - -The message string to use in the event. - -If you set this to `epoch` then this plugin will use the current -timestamp in unix timestamp (which is by definition, UTC). It will -output this value into a field called `clock` - -If you set this to `sequence` then this plugin will send a sequence of -numbers beginning at 0 and incrementing each interval. It will -output this value into a field called `clock` - -Otherwise, this value will be used verbatim as the event message. It -will output this value into a field called `message` - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/heroku-index.asciidoc b/docs/versioned-plugins/inputs/heroku-index.asciidoc deleted file mode 100644 index 3b78e80db..000000000 --- a/docs/versioned-plugins/inputs/heroku-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: heroku -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::heroku-v3.0.2.asciidoc[] -include::heroku-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/heroku-v3.0.1.asciidoc b/docs/versioned-plugins/inputs/heroku-v3.0.1.asciidoc deleted file mode 100644 index 54f20d59f..000000000 --- a/docs/versioned-plugins/inputs/heroku-v3.0.1.asciidoc +++ /dev/null @@ -1,66 +0,0 @@ -:plugin: heroku -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-heroku/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Heroku input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Stream events from a heroku app's logs. - -This will read events in a manner similar to how the `heroku logs -t` command -fetches logs. - -Recommended filters: -[source,ruby] - filter { - grok { - pattern => "^%{TIMESTAMP_ISO8601:timestamp} %{WORD:component}\[%{WORD:process}(?:\.%{INT:instance:int})?\]: %{DATA:message}$" - } - date { timestamp => ISO8601 } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Heroku Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-app>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-app"] -===== `app` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of your heroku application. This is usually the first part of the -the domain name `my-app-name.herokuapp.com` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/heroku-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/heroku-v3.0.2.asciidoc deleted file mode 100644 index cce0b1073..000000000 --- a/docs/versioned-plugins/inputs/heroku-v3.0.2.asciidoc +++ /dev/null @@ -1,66 +0,0 @@ -:plugin: heroku -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-heroku/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Heroku input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Stream events from a heroku app's logs. - -This will read events in a manner similar to how the `heroku logs -t` command -fetches logs. - -Recommended filters: -[source,ruby] - filter { - grok { - pattern => "^%{TIMESTAMP_ISO8601:timestamp} %{WORD:component}\[%{WORD:process}(?:\.%{INT:instance:int})?\]: %{DATA:message}$" - } - date { timestamp => ISO8601 } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Heroku Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-app>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-app"] -===== `app` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of your heroku application. This is usually the first part of the -the domain name `my-app-name.herokuapp.com` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-index.asciidoc b/docs/versioned-plugins/inputs/http_poller-index.asciidoc deleted file mode 100644 index ecad8e9c7..000000000 --- a/docs/versioned-plugins/inputs/http_poller-index.asciidoc +++ /dev/null @@ -1,30 +0,0 @@ -:plugin: http_poller -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-09-20 -| <> | 2017-09-07 -| <> | 2017-08-15 -| <> | 2017-08-02 -| <> | 2017-09-21 -| <> | 2017-09-07 -| <> | 2017-08-18 -| <> | 2017-06-23 -| <> | 2017-05-08 -|======================================================================= - -include::http_poller-v4.0.4.asciidoc[] -include::http_poller-v4.0.3.asciidoc[] -include::http_poller-v4.0.2.asciidoc[] -include::http_poller-v4.0.1.asciidoc[] -include::http_poller-v4.0.0.asciidoc[] -include::http_poller-v3.3.4.asciidoc[] -include::http_poller-v3.3.3.asciidoc[] -include::http_poller-v3.3.2.asciidoc[] -include::http_poller-v3.3.1.asciidoc[] -include::http_poller-v3.3.0.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/http_poller-v3.3.0.asciidoc b/docs/versioned-plugins/inputs/http_poller-v3.3.0.asciidoc deleted file mode 100644 index 1ae935274..000000000 --- a/docs/versioned-plugins/inputs/http_poller-v3.3.0.asciidoc +++ /dev/null @@ -1,400 +0,0 @@ -:plugin: http_poller -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.3.0 -:release_date: 2017-05-08 -:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v3.3.0/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Http_poller - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and -send them on their merry way. The idea behind this plugins came from a need to read springboot -metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. - -==== Example -Reads from a list of urls and decodes the body of the response with a codec. -The config should look like this: - -[source,ruby] ----------------------------------- -input { - http_poller { - urls => { - test1 => "http://localhost:9200" - test2 => { - # Supports all options supported by ruby's Manticore HTTP client - method => get - user => "AzureDiamond" - password => "hunter2" - url => "http://localhost:9200/_cluster/health" - headers => { - Accept => "application/json" - } - } - } - request_timeout => 60 - # Supports "cron", "every", "at" and "in" schedules by rufus scheduler - schedule => { cron => "* * * * * UTC"} - codec => "json" - # A hash of request metadata info (timing, response headers, etc.) will be sent here - metadata_target => "http_poller_metadata" - } -} - -output { - stdout { - codec => rubydebug - } -} ----------------------------------- - -Using the HTTP poller with custom a custom CA or self signed cert. - -If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. - -[source,ruby] ----------------------------------- -openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks ----------------------------------- - -The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. - - -[source,ruby] ----------------------------------- - http_poller { - urls => { - myurl => "https://myhostname:1234" - } - truststore => "/path/to/downloaded_truststore.jks" - truststore_password => "mypassword" - interval => 30 - } ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http_poller Input Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. -If you set this you must also set the `password` option. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to be used in conjunction with the username for HTTP authentication. - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -How often (in seconds) the urls will be called -DEPRECATED. Use 'schedule' option instead. -If both interval and schedule options are specified, interval -option takes higher precedence - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-metadata_target"] -===== `metadata_target` - - * Value type is <> - * Default value is `"@metadata"` - -If you'd like to work with the request/response metadata. -Set this value to the name of the field you'd like to store a nested -hash of metadata. - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically poll from the urls -Format: A hash with - + key: "cron" | "every" | "in" | "at" - + value: string -Examples: - a) { "every" => "1h" } - b) { "cron" => "* * * * * UTC" } -See: rufus/scheduler for details about different schedule options and value string format - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] -===== `ssl_certificate_validation` - - * Value type is <> - * Default value is `true` - -Set this to false to disable SSL/TLS certificate validation -Note: setting this to false is generally considered insecure! - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-urls"] -===== `urls` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A Hash of urls in this format : `"name" => "url"`. -The name and the url will be passed in the outputed event - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -# You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/http_poller-v3.3.1.asciidoc b/docs/versioned-plugins/inputs/http_poller-v3.3.1.asciidoc deleted file mode 100644 index cefa22c27..000000000 --- a/docs/versioned-plugins/inputs/http_poller-v3.3.1.asciidoc +++ /dev/null @@ -1,401 +0,0 @@ -:plugin: http_poller -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.3.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v3.3.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Http_poller input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and -send them on their merry way. The idea behind this plugins came from a need to read springboot -metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. - -==== Example -Reads from a list of urls and decodes the body of the response with a codec. -The config should look like this: - -[source,ruby] ----------------------------------- -input { - http_poller { - urls => { - test1 => "http://localhost:9200" - test2 => { - # Supports all options supported by ruby's Manticore HTTP client - method => get - user => "AzureDiamond" - password => "hunter2" - url => "http://localhost:9200/_cluster/health" - headers => { - Accept => "application/json" - } - } - } - request_timeout => 60 - # Supports "cron", "every", "at" and "in" schedules by rufus scheduler - schedule => { cron => "* * * * * UTC"} - codec => "json" - # A hash of request metadata info (timing, response headers, etc.) will be sent here - metadata_target => "http_poller_metadata" - } -} - -output { - stdout { - codec => rubydebug - } -} ----------------------------------- - -Using the HTTP poller with custom a custom CA or self signed cert. - -If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. - -[source,ruby] ----------------------------------- -openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks ----------------------------------- - -The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. - - -[source,ruby] ----------------------------------- - http_poller { - urls => { - myurl => "https://myhostname:1234" - } - truststore => "/path/to/downloaded_truststore.jks" - truststore_password => "mypassword" - interval => 30 - } ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http_poller Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. -If you set this you must also set the `password` option. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to be used in conjunction with the username for HTTP authentication. - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -How often (in seconds) the urls will be called -DEPRECATED. Use 'schedule' option instead. -If both interval and schedule options are specified, interval -option takes higher precedence - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-metadata_target"] -===== `metadata_target` - - * Value type is <> - * Default value is `"@metadata"` - -If you'd like to work with the request/response metadata. -Set this value to the name of the field you'd like to store a nested -hash of metadata. - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically poll from the urls -Format: A hash with - + key: "cron" | "every" | "in" | "at" - + value: string -Examples: - a) { "every" => "1h" } - b) { "cron" => "* * * * * UTC" } -See: rufus/scheduler for details about different schedule options and value string format - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] -===== `ssl_certificate_validation` - - * Value type is <> - * Default value is `true` - -Set this to false to disable SSL/TLS certificate validation -Note: setting this to false is generally considered insecure! - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-urls"] -===== `urls` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A Hash of urls in this format : `"name" => "url"`. -The name and the url will be passed in the outputed event - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -# You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-v3.3.2.asciidoc b/docs/versioned-plugins/inputs/http_poller-v3.3.2.asciidoc deleted file mode 100644 index 0d7e63035..000000000 --- a/docs/versioned-plugins/inputs/http_poller-v3.3.2.asciidoc +++ /dev/null @@ -1,401 +0,0 @@ -:plugin: http_poller -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.3.2 -:release_date: 2017-08-18 -:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v3.3.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Http_poller input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and -send them on their merry way. The idea behind this plugins came from a need to read springboot -metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. - -==== Example -Reads from a list of urls and decodes the body of the response with a codec. -The config should look like this: - -[source,ruby] ----------------------------------- -input { - http_poller { - urls => { - test1 => "http://localhost:9200" - test2 => { - # Supports all options supported by ruby's Manticore HTTP client - method => get - user => "AzureDiamond" - password => "hunter2" - url => "http://localhost:9200/_cluster/health" - headers => { - Accept => "application/json" - } - } - } - request_timeout => 60 - # Supports "cron", "every", "at" and "in" schedules by rufus scheduler - schedule => { cron => "* * * * * UTC"} - codec => "json" - # A hash of request metadata info (timing, response headers, etc.) will be sent here - metadata_target => "http_poller_metadata" - } -} - -output { - stdout { - codec => rubydebug - } -} ----------------------------------- - -Using the HTTP poller with custom a custom CA or self signed cert. - -If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. - -[source,ruby] ----------------------------------- -openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks ----------------------------------- - -The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. - - -[source,ruby] ----------------------------------- - http_poller { - urls => { - myurl => "https://myhostname:1234" - } - truststore => "/path/to/downloaded_truststore.jks" - truststore_password => "mypassword" - interval => 30 - } ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http_poller Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. -If you set this you must also set the `password` option. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to be used in conjunction with the username for HTTP authentication. - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -How often (in seconds) the urls will be called -DEPRECATED. Use 'schedule' option instead. -If both interval and schedule options are specified, interval -option takes higher precedence - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-metadata_target"] -===== `metadata_target` - - * Value type is <> - * Default value is `"@metadata"` - -If you'd like to work with the request/response metadata. -Set this value to the name of the field you'd like to store a nested -hash of metadata. - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically poll from the urls -Format: A hash with - + key: "cron" | "every" | "in" | "at" - + value: string -Examples: - a) { "every" => "1h" } - b) { "cron" => "* * * * * UTC" } -See: rufus/scheduler for details about different schedule options and value string format - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] -===== `ssl_certificate_validation` - - * Value type is <> - * Default value is `true` - -Set this to false to disable SSL/TLS certificate validation -Note: setting this to false is generally considered insecure! - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-urls"] -===== `urls` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A Hash of urls in this format : `"name" => "url"`. -The name and the url will be passed in the outputed event - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -# You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-v3.3.3.asciidoc b/docs/versioned-plugins/inputs/http_poller-v3.3.3.asciidoc deleted file mode 100644 index 3f638b9d5..000000000 --- a/docs/versioned-plugins/inputs/http_poller-v3.3.3.asciidoc +++ /dev/null @@ -1,401 +0,0 @@ -:plugin: http_poller -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.3.3 -:release_date: 2017-09-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v3.3.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Http_poller input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and -send them on their merry way. The idea behind this plugins came from a need to read springboot -metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. - -==== Example -Reads from a list of urls and decodes the body of the response with a codec. -The config should look like this: - -[source,ruby] ----------------------------------- -input { - http_poller { - urls => { - test1 => "http://localhost:9200" - test2 => { - # Supports all options supported by ruby's Manticore HTTP client - method => get - user => "AzureDiamond" - password => "hunter2" - url => "http://localhost:9200/_cluster/health" - headers => { - Accept => "application/json" - } - } - } - request_timeout => 60 - # Supports "cron", "every", "at" and "in" schedules by rufus scheduler - schedule => { cron => "* * * * * UTC"} - codec => "json" - # A hash of request metadata info (timing, response headers, etc.) will be sent here - metadata_target => "http_poller_metadata" - } -} - -output { - stdout { - codec => rubydebug - } -} ----------------------------------- - -Using the HTTP poller with custom a custom CA or self signed cert. - -If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. - -[source,ruby] ----------------------------------- -openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks ----------------------------------- - -The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. - - -[source,ruby] ----------------------------------- - http_poller { - urls => { - myurl => "https://myhostname:1234" - } - truststore => "/path/to/downloaded_truststore.jks" - truststore_password => "mypassword" - interval => 30 - } ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http_poller Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. -If you set this you must also set the `password` option. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to be used in conjunction with the username for HTTP authentication. - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -How often (in seconds) the urls will be called -DEPRECATED. Use 'schedule' option instead. -If both interval and schedule options are specified, interval -option takes higher precedence - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-metadata_target"] -===== `metadata_target` - - * Value type is <> - * Default value is `"@metadata"` - -If you'd like to work with the request/response metadata. -Set this value to the name of the field you'd like to store a nested -hash of metadata. - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically poll from the urls -Format: A hash with - + key: "cron" | "every" | "in" | "at" - + value: string -Examples: - a) { "every" => "1h" } - b) { "cron" => "* * * * * UTC" } -See: rufus/scheduler for details about different schedule options and value string format - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] -===== `ssl_certificate_validation` - - * Value type is <> - * Default value is `true` - -Set this to false to disable SSL/TLS certificate validation -Note: setting this to false is generally considered insecure! - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-urls"] -===== `urls` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A Hash of urls in this format : `"name" => "url"`. -The name and the url will be passed in the outputed event - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -# You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-v3.3.4.asciidoc b/docs/versioned-plugins/inputs/http_poller-v3.3.4.asciidoc deleted file mode 100644 index 7bbad1df5..000000000 --- a/docs/versioned-plugins/inputs/http_poller-v3.3.4.asciidoc +++ /dev/null @@ -1,391 +0,0 @@ -:plugin: http_poller -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.3.4 -:release_date: 2017-09-21 -:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v3.3.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Http_poller input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and -send them on their merry way. The idea behind this plugins came from a need to read springboot -metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. - -==== Example -Reads from a list of urls and decodes the body of the response with a codec. -The config should look like this: - -[source,ruby] ----------------------------------- -input { - http_poller { - urls => { - test1 => "http://localhost:9200" - test2 => { - # Supports all options supported by ruby's Manticore HTTP client - method => get - user => "AzureDiamond" - password => "hunter2" - url => "http://localhost:9200/_cluster/health" - headers => { - Accept => "application/json" - } - } - } - request_timeout => 60 - # Supports "cron", "every", "at" and "in" schedules by rufus scheduler - schedule => { cron => "* * * * * UTC"} - codec => "json" - # A hash of request metadata info (timing, response headers, etc.) will be sent here - metadata_target => "http_poller_metadata" - } -} - -output { - stdout { - codec => rubydebug - } -} ----------------------------------- - -Using the HTTP poller with custom a custom CA or self signed cert. - -If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. - -[source,ruby] ----------------------------------- -openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks ----------------------------------- - -The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. - - -[source,ruby] ----------------------------------- - http_poller { - urls => { - myurl => "https://myhostname:1234" - } - truststore => "/path/to/downloaded_truststore.jks" - truststore_password => "mypassword" - interval => 30 - } ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http_poller Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. -If you set this you must also set the `password` option. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to be used in conjunction with the username for HTTP authentication. - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -How often (in seconds) the urls will be called -DEPRECATED. Use 'schedule' option instead. -If both interval and schedule options are specified, interval -option takes higher precedence - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-metadata_target"] -===== `metadata_target` - - * Value type is <> - * Default value is `"@metadata"` - -If you'd like to work with the request/response metadata. -Set this value to the name of the field you'd like to store a nested -hash of metadata. - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically poll from the urls -Format: A hash with - + key: "cron" | "every" | "in" | "at" - + value: string -Examples: - a) { "every" => "1h" } - b) { "cron" => "* * * * * UTC" } -See: rufus/scheduler for details about different schedule options and value string format - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-urls"] -===== `urls` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A Hash of urls in this format : `"name" => "url"`. -The name and the url will be passed in the outputed event - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -# You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-v4.0.0.asciidoc b/docs/versioned-plugins/inputs/http_poller-v4.0.0.asciidoc deleted file mode 100644 index fa381e247..000000000 --- a/docs/versioned-plugins/inputs/http_poller-v4.0.0.asciidoc +++ /dev/null @@ -1,380 +0,0 @@ -:plugin: http_poller -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.0 -:release_date: 2017-08-02 -:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v4.0.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Http_poller input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and -send them on their merry way. The idea behind this plugins came from a need to read springboot -metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. - -==== Example -Reads from a list of urls and decodes the body of the response with a codec. -The config should look like this: - -[source,ruby] ----------------------------------- -input { - http_poller { - urls => { - test1 => "http://localhost:9200" - test2 => { - # Supports all options supported by ruby's Manticore HTTP client - method => get - user => "AzureDiamond" - password => "hunter2" - url => "http://localhost:9200/_cluster/health" - headers => { - Accept => "application/json" - } - } - } - request_timeout => 60 - # Supports "cron", "every", "at" and "in" schedules by rufus scheduler - schedule => { cron => "* * * * * UTC"} - codec => "json" - # A hash of request metadata info (timing, response headers, etc.) will be sent here - metadata_target => "http_poller_metadata" - } -} - -output { - stdout { - codec => rubydebug - } -} ----------------------------------- - -Using the HTTP poller with custom a custom CA or self signed cert. - -If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. - -[source,ruby] ----------------------------------- -openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks ----------------------------------- - -The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. - - -[source,ruby] ----------------------------------- - http_poller { - urls => { - myurl => "https://myhostname:1234" - } - truststore => "/path/to/downloaded_truststore.jks" - truststore_password => "mypassword" - schedule => { cron => "* * * * * UTC"} - } ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http_poller Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. -If you set this you must also set the `password` option. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to be used in conjunction with the username for HTTP authentication. - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-metadata_target"] -===== `metadata_target` - - * Value type is <> - * Default value is `"@metadata"` - -If you'd like to work with the request/response metadata. -Set this value to the name of the field you'd like to store a nested -hash of metadata. - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically poll from the urls -Format: A hash with - + key: "cron" | "every" | "in" | "at" - + value: string -Examples: - a) { "every" => "1h" } - b) { "cron" => "* * * * * UTC" } -See: rufus/scheduler for details about different schedule options and value string format - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-urls"] -===== `urls` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A Hash of urls in this format : `"name" => "url"`. -The name and the url will be passed in the outputed event - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -# You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-v4.0.1.asciidoc b/docs/versioned-plugins/inputs/http_poller-v4.0.1.asciidoc deleted file mode 100644 index 2b333402e..000000000 --- a/docs/versioned-plugins/inputs/http_poller-v4.0.1.asciidoc +++ /dev/null @@ -1,380 +0,0 @@ -:plugin: http_poller -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.1 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v4.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Http_poller input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and -send them on their merry way. The idea behind this plugins came from a need to read springboot -metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. - -==== Example -Reads from a list of urls and decodes the body of the response with a codec. -The config should look like this: - -[source,ruby] ----------------------------------- -input { - http_poller { - urls => { - test1 => "http://localhost:9200" - test2 => { - # Supports all options supported by ruby's Manticore HTTP client - method => get - user => "AzureDiamond" - password => "hunter2" - url => "http://localhost:9200/_cluster/health" - headers => { - Accept => "application/json" - } - } - } - request_timeout => 60 - # Supports "cron", "every", "at" and "in" schedules by rufus scheduler - schedule => { cron => "* * * * * UTC"} - codec => "json" - # A hash of request metadata info (timing, response headers, etc.) will be sent here - metadata_target => "http_poller_metadata" - } -} - -output { - stdout { - codec => rubydebug - } -} ----------------------------------- - -Using the HTTP poller with custom a custom CA or self signed cert. - -If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. - -[source,ruby] ----------------------------------- -openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks ----------------------------------- - -The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. - - -[source,ruby] ----------------------------------- - http_poller { - urls => { - myurl => "https://myhostname:1234" - } - truststore => "/path/to/downloaded_truststore.jks" - truststore_password => "mypassword" - schedule => { cron => "* * * * * UTC"} - } ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http_poller Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. -If you set this you must also set the `password` option. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to be used in conjunction with the username for HTTP authentication. - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-metadata_target"] -===== `metadata_target` - - * Value type is <> - * Default value is `"@metadata"` - -If you'd like to work with the request/response metadata. -Set this value to the name of the field you'd like to store a nested -hash of metadata. - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically poll from the urls -Format: A hash with - + key: "cron" | "every" | "in" | "at" - + value: string -Examples: - a) { "every" => "1h" } - b) { "cron" => "* * * * * UTC" } -See: rufus/scheduler for details about different schedule options and value string format - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-urls"] -===== `urls` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A Hash of urls in this format : `"name" => "url"`. -The name and the url will be passed in the outputed event - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -# You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-v4.0.2.asciidoc b/docs/versioned-plugins/inputs/http_poller-v4.0.2.asciidoc deleted file mode 100644 index 077a47ed0..000000000 --- a/docs/versioned-plugins/inputs/http_poller-v4.0.2.asciidoc +++ /dev/null @@ -1,380 +0,0 @@ -:plugin: http_poller -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.2 -:release_date: 2017-09-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v4.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Http_poller input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and -send them on their merry way. The idea behind this plugins came from a need to read springboot -metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. - -==== Example -Reads from a list of urls and decodes the body of the response with a codec. -The config should look like this: - -[source,ruby] ----------------------------------- -input { - http_poller { - urls => { - test1 => "http://localhost:9200" - test2 => { - # Supports all options supported by ruby's Manticore HTTP client - method => get - user => "AzureDiamond" - password => "hunter2" - url => "http://localhost:9200/_cluster/health" - headers => { - Accept => "application/json" - } - } - } - request_timeout => 60 - # Supports "cron", "every", "at" and "in" schedules by rufus scheduler - schedule => { cron => "* * * * * UTC"} - codec => "json" - # A hash of request metadata info (timing, response headers, etc.) will be sent here - metadata_target => "http_poller_metadata" - } -} - -output { - stdout { - codec => rubydebug - } -} ----------------------------------- - -Using the HTTP poller with custom a custom CA or self signed cert. - -If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. - -[source,ruby] ----------------------------------- -openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks ----------------------------------- - -The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. - - -[source,ruby] ----------------------------------- - http_poller { - urls => { - myurl => "https://myhostname:1234" - } - truststore => "/path/to/downloaded_truststore.jks" - truststore_password => "mypassword" - schedule => { cron => "* * * * * UTC"} - } ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http_poller Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. -If you set this you must also set the `password` option. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to be used in conjunction with the username for HTTP authentication. - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-metadata_target"] -===== `metadata_target` - - * Value type is <> - * Default value is `"@metadata"` - -If you'd like to work with the request/response metadata. -Set this value to the name of the field you'd like to store a nested -hash of metadata. - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically poll from the urls -Format: A hash with - + key: "cron" | "every" | "in" | "at" - + value: string -Examples: - a) { "every" => "1h" } - b) { "cron" => "* * * * * UTC" } -See: rufus/scheduler for details about different schedule options and value string format - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-urls"] -===== `urls` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A Hash of urls in this format : `"name" => "url"`. -The name and the url will be passed in the outputed event - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -# You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-v4.0.3.asciidoc b/docs/versioned-plugins/inputs/http_poller-v4.0.3.asciidoc deleted file mode 100644 index 51b6ad95f..000000000 --- a/docs/versioned-plugins/inputs/http_poller-v4.0.3.asciidoc +++ /dev/null @@ -1,379 +0,0 @@ -:plugin: http_poller -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.3 -:release_date: 2017-09-20 -:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v4.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Http_poller input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and -send them on their merry way. The idea behind this plugins came from a need to read springboot -metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. - -==== Example -Reads from a list of urls and decodes the body of the response with a codec. -The config should look like this: - -[source,ruby] ----------------------------------- -input { - http_poller { - urls => { - test1 => "http://localhost:9200" - test2 => { - # Supports all options supported by ruby's Manticore HTTP client - method => get - user => "AzureDiamond" - password => "hunter2" - url => "http://localhost:9200/_cluster/health" - headers => { - Accept => "application/json" - } - } - } - request_timeout => 60 - # Supports "cron", "every", "at" and "in" schedules by rufus scheduler - schedule => { cron => "* * * * * UTC"} - codec => "json" - # A hash of request metadata info (timing, response headers, etc.) will be sent here - metadata_target => "http_poller_metadata" - } -} - -output { - stdout { - codec => rubydebug - } -} ----------------------------------- - -Using the HTTP poller with custom a custom CA or self signed cert. - -If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. - -[source,ruby] ----------------------------------- -openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks ----------------------------------- - -The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. - - -[source,ruby] ----------------------------------- - http_poller { - urls => { - myurl => "https://myhostname:1234" - } - truststore => "/path/to/downloaded_truststore.jks" - truststore_password => "mypassword" - schedule => { cron => "* * * * * UTC"} - } ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http_poller Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. -If you set this you must also set the `password` option. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to be used in conjunction with the username for HTTP authentication. - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-metadata_target"] -===== `metadata_target` - - * Value type is <> - * Default value is `"@metadata"` - -If you'd like to work with the request/response metadata. -Set this value to the name of the field you'd like to store a nested -hash of metadata. - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically poll from the urls -Format: A hash with - + key: "cron" | "every" | "in" | "at" - + value: string -Examples: - a) { "every" => "1h" } - b) { "cron" => "* * * * * UTC" } -See: rufus/scheduler for details about different schedule options and value string format - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-urls"] -===== `urls` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A Hash of urls in this format : `"name" => "url"`. -The name and the url will be passed in the outputed event - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -# You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/http_poller-v4.0.4.asciidoc b/docs/versioned-plugins/inputs/http_poller-v4.0.4.asciidoc deleted file mode 100644 index 8edcf588a..000000000 --- a/docs/versioned-plugins/inputs/http_poller-v4.0.4.asciidoc +++ /dev/null @@ -1,379 +0,0 @@ -:plugin: http_poller -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.4 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-http_poller/blob/v4.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Http_poller input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This Logstash input plugin allows you to call an HTTP API, decode the output of it into event(s), and -send them on their merry way. The idea behind this plugins came from a need to read springboot -metrics endpoint, instead of configuring jmx to monitor my java application memory/gc/ etc. - -==== Example -Reads from a list of urls and decodes the body of the response with a codec. -The config should look like this: - -[source,ruby] ----------------------------------- -input { - http_poller { - urls => { - test1 => "http://localhost:9200" - test2 => { - # Supports all options supported by ruby's Manticore HTTP client - method => get - user => "AzureDiamond" - password => "hunter2" - url => "http://localhost:9200/_cluster/health" - headers => { - Accept => "application/json" - } - } - } - request_timeout => 60 - # Supports "cron", "every", "at" and "in" schedules by rufus scheduler - schedule => { cron => "* * * * * UTC"} - codec => "json" - # A hash of request metadata info (timing, response headers, etc.) will be sent here - metadata_target => "http_poller_metadata" - } -} - -output { - stdout { - codec => rubydebug - } -} ----------------------------------- - -Using the HTTP poller with custom a custom CA or self signed cert. - -If you have a self signed cert you will need to convert your server's certificate to a valid# `.jks` or `.p12` file. An easy way to do it is to run the following one-liner, substituting your server's URL for the placeholder `MYURL` and `MYPORT`. - -[source,ruby] ----------------------------------- -openssl s_client -showcerts -connect MYURL:MYPORT /dev/null|openssl x509 -outform PEM > downloaded_cert.pem; keytool -import -alias test -file downloaded_cert.pem -keystore downloaded_truststore.jks ----------------------------------- - -The above snippet will create two files `downloaded_cert.pem` and `downloaded_truststore.jks`. You will be prompted to set a password for the `jks` file during this process. To configure logstash use a config like the one that follows. - - -[source,ruby] ----------------------------------- - http_poller { - urls => { - myurl => "https://myhostname:1234" - } - truststore => "/path/to/downloaded_truststore.jks" - truststore_password => "mypassword" - schedule => { cron => "* * * * * UTC"} - } ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http_poller Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|no -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-target>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-urls>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Username to use with HTTP authentication for ALL requests. Note that you can also set this per-URL. -If you set this you must also set the `password` option. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to be used in conjunction with the username for HTTP authentication. - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-metadata_target"] -===== `metadata_target` - - * Value type is <> - * Default value is `"@metadata"` - -If you'd like to work with the request/response metadata. -Set this value to the name of the field you'd like to store a nested -hash of metadata. - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically poll from the urls -Format: A hash with - + key: "cron" | "every" | "in" | "at" - + value: string -Examples: - a) { "every" => "1h" } - b) { "cron" => "* * * * * UTC" } -See: rufus/scheduler for details about different schedule options and value string format - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-target"] -===== `target` - - * Value type is <> - * There is no default value for this setting. - -Define the target field for placing the received data. If this setting is omitted, the data will be stored at the root (top level) of the event. - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-urls"] -===== `urls` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A Hash of urls in this format : `"name" => "url"`. -The name and the url will be passed in the outputed event - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -# You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/imap-index.asciidoc b/docs/versioned-plugins/inputs/imap-index.asciidoc deleted file mode 100644 index a0487c8a0..000000000 --- a/docs/versioned-plugins/inputs/imap-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: imap -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::imap-v3.0.5.asciidoc[] -include::imap-v3.0.4.asciidoc[] -include::imap-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/imap-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/imap-v3.0.3.asciidoc deleted file mode 100644 index 3f9c0d935..000000000 --- a/docs/versioned-plugins/inputs/imap-v3.0.3.asciidoc +++ /dev/null @@ -1,176 +0,0 @@ -:plugin: imap -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-imap/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Imap input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read mails from IMAP server - -Periodically scan an IMAP folder (`INBOX` by default) and move any read messages -to the trash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Imap Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-check_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-expunge>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-folder>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-lowercase_headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-strip_attachments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-verify_cert>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-check_interval"] -===== `check_interval` - - * Value type is <> - * Default value is `300` - - - -[id="{version}-plugins-{type}s-{plugin}-content_type"] -===== `content_type` - - * Value type is <> - * Default value is `"text/plain"` - -For multipart messages, use the first part that has this -content-type as the event message. - -[id="{version}-plugins-{type}s-{plugin}-delete"] -===== `delete` - - * Value type is <> - * Default value is `false` - - - -[id="{version}-plugins-{type}s-{plugin}-expunge"] -===== `expunge` - - * Value type is <> - * Default value is `false` - - - -[id="{version}-plugins-{type}s-{plugin}-fetch_count"] -===== `fetch_count` - - * Value type is <> - * Default value is `50` - - - -[id="{version}-plugins-{type}s-{plugin}-folder"] -===== `folder` - - * Value type is <> - * Default value is `"INBOX"` - - - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-lowercase_headers"] -===== `lowercase_headers` - - * Value type is <> - * Default value is `true` - - - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-secure"] -===== `secure` - - * Value type is <> - * Default value is `true` - - - -[id="{version}-plugins-{type}s-{plugin}-strip_attachments"] -===== `strip_attachments` - - * Value type is <> - * Default value is `false` - - - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-verify_cert"] -===== `verify_cert` - - * Value type is <> - * Default value is `true` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/imap-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/imap-v3.0.4.asciidoc deleted file mode 100644 index 58778284b..000000000 --- a/docs/versioned-plugins/inputs/imap-v3.0.4.asciidoc +++ /dev/null @@ -1,176 +0,0 @@ -:plugin: imap -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-imap/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Imap input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read mails from IMAP server - -Periodically scan an IMAP folder (`INBOX` by default) and move any read messages -to the trash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Imap Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-check_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-expunge>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-folder>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-lowercase_headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-strip_attachments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-verify_cert>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-check_interval"] -===== `check_interval` - - * Value type is <> - * Default value is `300` - - - -[id="{version}-plugins-{type}s-{plugin}-content_type"] -===== `content_type` - - * Value type is <> - * Default value is `"text/plain"` - -For multipart messages, use the first part that has this -content-type as the event message. - -[id="{version}-plugins-{type}s-{plugin}-delete"] -===== `delete` - - * Value type is <> - * Default value is `false` - - - -[id="{version}-plugins-{type}s-{plugin}-expunge"] -===== `expunge` - - * Value type is <> - * Default value is `false` - - - -[id="{version}-plugins-{type}s-{plugin}-fetch_count"] -===== `fetch_count` - - * Value type is <> - * Default value is `50` - - - -[id="{version}-plugins-{type}s-{plugin}-folder"] -===== `folder` - - * Value type is <> - * Default value is `"INBOX"` - - - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-lowercase_headers"] -===== `lowercase_headers` - - * Value type is <> - * Default value is `true` - - - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-secure"] -===== `secure` - - * Value type is <> - * Default value is `true` - - - -[id="{version}-plugins-{type}s-{plugin}-strip_attachments"] -===== `strip_attachments` - - * Value type is <> - * Default value is `false` - - - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-verify_cert"] -===== `verify_cert` - - * Value type is <> - * Default value is `true` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/imap-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/imap-v3.0.5.asciidoc deleted file mode 100644 index d032c9279..000000000 --- a/docs/versioned-plugins/inputs/imap-v3.0.5.asciidoc +++ /dev/null @@ -1,176 +0,0 @@ -:plugin: imap -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-imap/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Imap input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read mails from IMAP server - -Periodically scan an IMAP folder (`INBOX` by default) and move any read messages -to the trash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Imap Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-check_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-expunge>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-folder>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-lowercase_headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-strip_attachments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-verify_cert>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-check_interval"] -===== `check_interval` - - * Value type is <> - * Default value is `300` - - - -[id="{version}-plugins-{type}s-{plugin}-content_type"] -===== `content_type` - - * Value type is <> - * Default value is `"text/plain"` - -For multipart messages, use the first part that has this -content-type as the event message. - -[id="{version}-plugins-{type}s-{plugin}-delete"] -===== `delete` - - * Value type is <> - * Default value is `false` - - - -[id="{version}-plugins-{type}s-{plugin}-expunge"] -===== `expunge` - - * Value type is <> - * Default value is `false` - - - -[id="{version}-plugins-{type}s-{plugin}-fetch_count"] -===== `fetch_count` - - * Value type is <> - * Default value is `50` - - - -[id="{version}-plugins-{type}s-{plugin}-folder"] -===== `folder` - - * Value type is <> - * Default value is `"INBOX"` - - - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-lowercase_headers"] -===== `lowercase_headers` - - * Value type is <> - * Default value is `true` - - - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-secure"] -===== `secure` - - * Value type is <> - * Default value is `true` - - - -[id="{version}-plugins-{type}s-{plugin}-strip_attachments"] -===== `strip_attachments` - - * Value type is <> - * Default value is `false` - - - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-verify_cert"] -===== `verify_cert` - - * Value type is <> - * Default value is `true` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/irc-index.asciidoc b/docs/versioned-plugins/inputs/irc-index.asciidoc deleted file mode 100644 index 0f778357c..000000000 --- a/docs/versioned-plugins/inputs/irc-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: irc -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-12-07 -| <> | 2017-11-14 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::irc-v3.0.6.asciidoc[] -include::irc-v3.0.5.asciidoc[] -include::irc-v3.0.4.asciidoc[] -include::irc-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/irc-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/irc-v3.0.3.asciidoc deleted file mode 100644 index 7cd60f64b..000000000 --- a/docs/versioned-plugins/inputs/irc-v3.0.3.asciidoc +++ /dev/null @@ -1,152 +0,0 @@ -:plugin: irc -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-irc/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Irc input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from an IRC Server. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Irc Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-catch_all>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-get_stats>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-nick>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-real>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-stats_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-catch_all"] -===== `catch_all` - - * Value type is <> - * Default value is `false` - -Catch all IRC channel/user events not just channel messages - -[id="{version}-plugins-{type}s-{plugin}-channels"] -===== `channels` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Channels to join and read messages from. - -These should be full channel names including the '#' symbol, such as -"#logstash". - -For passworded channels, add a space and the channel password, such as -"#logstash password". - - -[id="{version}-plugins-{type}s-{plugin}-get_stats"] -===== `get_stats` - - * Value type is <> - * Default value is `false` - -Gather and send user counts for channels - this requires catch_all and will force it - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Host of the IRC Server to connect to. - -[id="{version}-plugins-{type}s-{plugin}-nick"] -===== `nick` - - * Value type is <> - * Default value is `"logstash"` - -IRC Nickname - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -IRC Server password - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6667` - -Port for the IRC Server - -[id="{version}-plugins-{type}s-{plugin}-real"] -===== `real` - - * Value type is <> - * Default value is `"logstash"` - -IRC Real name - -[id="{version}-plugins-{type}s-{plugin}-secure"] -===== `secure` - - * Value type is <> - * Default value is `false` - -Set this to true to enable SSL. - -[id="{version}-plugins-{type}s-{plugin}-stats_interval"] -===== `stats_interval` - - * Value type is <> - * Default value is `5` - -How often in minutes to get the user count stats - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"logstash"` - -IRC Username - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/irc-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/irc-v3.0.4.asciidoc deleted file mode 100644 index 7146b93d4..000000000 --- a/docs/versioned-plugins/inputs/irc-v3.0.4.asciidoc +++ /dev/null @@ -1,152 +0,0 @@ -:plugin: irc -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-irc/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Irc input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from an IRC Server. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Irc Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-catch_all>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-get_stats>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-nick>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-real>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-stats_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-catch_all"] -===== `catch_all` - - * Value type is <> - * Default value is `false` - -Catch all IRC channel/user events not just channel messages - -[id="{version}-plugins-{type}s-{plugin}-channels"] -===== `channels` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Channels to join and read messages from. - -These should be full channel names including the '#' symbol, such as -"#logstash". - -For passworded channels, add a space and the channel password, such as -"#logstash password". - - -[id="{version}-plugins-{type}s-{plugin}-get_stats"] -===== `get_stats` - - * Value type is <> - * Default value is `false` - -Gather and send user counts for channels - this requires catch_all and will force it - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Host of the IRC Server to connect to. - -[id="{version}-plugins-{type}s-{plugin}-nick"] -===== `nick` - - * Value type is <> - * Default value is `"logstash"` - -IRC Nickname - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -IRC Server password - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6667` - -Port for the IRC Server - -[id="{version}-plugins-{type}s-{plugin}-real"] -===== `real` - - * Value type is <> - * Default value is `"logstash"` - -IRC Real name - -[id="{version}-plugins-{type}s-{plugin}-secure"] -===== `secure` - - * Value type is <> - * Default value is `false` - -Set this to true to enable SSL. - -[id="{version}-plugins-{type}s-{plugin}-stats_interval"] -===== `stats_interval` - - * Value type is <> - * Default value is `5` - -How often in minutes to get the user count stats - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"logstash"` - -IRC Username - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/irc-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/irc-v3.0.5.asciidoc deleted file mode 100644 index 49a8e3929..000000000 --- a/docs/versioned-plugins/inputs/irc-v3.0.5.asciidoc +++ /dev/null @@ -1,152 +0,0 @@ -:plugin: irc -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-14 -:changelog_url: https://github.com/logstash-plugins/logstash-input-irc/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Irc input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from an IRC Server. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Irc Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-catch_all>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-get_stats>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-nick>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-real>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-stats_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-catch_all"] -===== `catch_all` - - * Value type is <> - * Default value is `false` - -Catch all IRC channel/user events not just channel messages - -[id="{version}-plugins-{type}s-{plugin}-channels"] -===== `channels` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Channels to join and read messages from. - -These should be full channel names including the '#' symbol, such as -"#logstash". - -For passworded channels, add a space and the channel password, such as -"#logstash password". - - -[id="{version}-plugins-{type}s-{plugin}-get_stats"] -===== `get_stats` - - * Value type is <> - * Default value is `false` - -Gather and send user counts for channels - this requires catch_all and will force it - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Host of the IRC Server to connect to. - -[id="{version}-plugins-{type}s-{plugin}-nick"] -===== `nick` - - * Value type is <> - * Default value is `"logstash"` - -IRC Nickname - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -IRC Server password - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6667` - -Port for the IRC Server - -[id="{version}-plugins-{type}s-{plugin}-real"] -===== `real` - - * Value type is <> - * Default value is `"logstash"` - -IRC Real name - -[id="{version}-plugins-{type}s-{plugin}-secure"] -===== `secure` - - * Value type is <> - * Default value is `false` - -Set this to true to enable SSL. - -[id="{version}-plugins-{type}s-{plugin}-stats_interval"] -===== `stats_interval` - - * Value type is <> - * Default value is `5` - -How often in minutes to get the user count stats - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"logstash"` - -IRC Username - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/irc-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/irc-v3.0.6.asciidoc deleted file mode 100644 index e5f3a11f9..000000000 --- a/docs/versioned-plugins/inputs/irc-v3.0.6.asciidoc +++ /dev/null @@ -1,152 +0,0 @@ -:plugin: irc -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-12-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-irc/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Irc input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from an IRC Server. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Irc Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-catch_all>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-get_stats>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-nick>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-real>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-stats_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-catch_all"] -===== `catch_all` - - * Value type is <> - * Default value is `false` - -Catch all IRC channel/user events not just channel messages - -[id="{version}-plugins-{type}s-{plugin}-channels"] -===== `channels` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Channels to join and read messages from. - -These should be full channel names including the '#' symbol, such as -"#logstash". - -For passworded channels, add a space and the channel password, such as -"#logstash password". - - -[id="{version}-plugins-{type}s-{plugin}-get_stats"] -===== `get_stats` - - * Value type is <> - * Default value is `false` - -Gather and send user counts for channels - this requires catch_all and will force it - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Host of the IRC Server to connect to. - -[id="{version}-plugins-{type}s-{plugin}-nick"] -===== `nick` - - * Value type is <> - * Default value is `"logstash"` - -IRC Nickname - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -IRC Server password - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6667` - -Port for the IRC Server - -[id="{version}-plugins-{type}s-{plugin}-real"] -===== `real` - - * Value type is <> - * Default value is `"logstash"` - -IRC Real name - -[id="{version}-plugins-{type}s-{plugin}-secure"] -===== `secure` - - * Value type is <> - * Default value is `false` - -Set this to true to enable SSL. - -[id="{version}-plugins-{type}s-{plugin}-stats_interval"] -===== `stats_interval` - - * Value type is <> - * Default value is `5` - -How often in minutes to get the user count stats - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"logstash"` - -IRC Username - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jdbc-index.asciidoc b/docs/versioned-plugins/inputs/jdbc-index.asciidoc deleted file mode 100644 index 743f4c280..000000000 --- a/docs/versioned-plugins/inputs/jdbc-index.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -:plugin: jdbc -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-12-14 -| <> | 2017-12-07 -| <> | 2017-11-07 -| <> | 2017-10-27 -| <> | 2017-08-21 -| <> | 2017-08-15 -| <> | 2017-07-25 -| <> | 2017-06-23 -|======================================================================= - -include::jdbc-v4.3.3.asciidoc[] -include::jdbc-v4.3.2.asciidoc[] -include::jdbc-v4.3.1.asciidoc[] -include::jdbc-v4.3.0.asciidoc[] -include::jdbc-v4.2.4.asciidoc[] -include::jdbc-v4.2.3.asciidoc[] -include::jdbc-v4.2.2.asciidoc[] -include::jdbc-v4.2.1.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/jdbc-v4.2.1.asciidoc b/docs/versioned-plugins/inputs/jdbc-v4.2.1.asciidoc deleted file mode 100644 index c71364058..000000000 --- a/docs/versioned-plugins/inputs/jdbc-v4.2.1.asciidoc +++ /dev/null @@ -1,486 +0,0 @@ -:plugin: jdbc -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.2.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-jdbc/blob/v4.2.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Jdbc input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This plugin was created as a way to ingest data in any database -with a JDBC interface into Logstash. You can periodically schedule ingestion -using a cron syntax (see `schedule` setting) or run the query one time to load -data into Logstash. Each row in the resultset becomes a single event. -Columns in the resultset are converted into fields in the event. - -==== Drivers - -This plugin does not come packaged with JDBC driver libraries. The desired -jdbc driver library must be explicitly passed in to the plugin using the -`jdbc_driver_library` configuration option. - -==== Scheduling - -Input from this plugin can be scheduled to run periodically according to a specific -schedule. This scheduling syntax is powered by https://github.com/jmettraux/rufus-scheduler[rufus-scheduler]. -The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ). - -Examples: - -|========================================================== -| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. -| `0 * * * *` | will execute on the 0th minute of every hour every day. -| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. -|========================================================== - - -Further documentation describing this syntax can be found https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings[here]. - -==== State - -The plugin will persist the `sql_last_value` parameter in the form of a -metadata file stored in the configured `last_run_metadata_path`. Upon query execution, -this file will be updated with the current value of `sql_last_value`. Next time -the pipeline starts up, this value will be updated by reading from the file. If -`clean_run` is set to true, this value will be ignored and `sql_last_value` will be -set to Jan 1, 1970, or 0 if `use_column_value` is true, as if no query has ever been executed. - -==== Dealing With Large Result-sets - -Many JDBC drivers use the `fetch_size` parameter to limit how many -results are pre-fetched at a time from the cursor into the client's cache -before retrieving more results from the result-set. This is configured in -this plugin using the `jdbc_fetch_size` configuration option. No fetch size -is set by default in this plugin, so the specific driver's default size will -be used. - -==== Usage: - -Here is an example of setting up the plugin to fetch data from a MySQL database. -First, we place the appropriate JDBC driver library in our current -path (this can be placed anywhere on your filesystem). In this example, we connect to -the 'mydb' database using the user: 'mysql' and wish to input all rows in the 'songs' -table that match a specific artist. The following examples demonstrates a possible -Logstash configuration for this. The `schedule` option in this example will -instruct the plugin to execute this input statement on the minute, every minute. - -[source,ruby] ------------------------------------------------------------------------------- -input { - jdbc { - jdbc_driver_library => "mysql-connector-java-5.1.36-bin.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://localhost:3306/mydb" - jdbc_user => "mysql" - parameters => { "favorite_artist" => "Beethoven" } - schedule => "* * * * *" - statement => "SELECT * from songs where artist = :favorite_artist" - } -} ------------------------------------------------------------------------------- - -==== Configuring SQL statement - -A sql statement is required for this input. This can be passed-in via a -statement option in the form of a string, or read from a file (`statement_filepath`). File -option is typically used when the SQL statement is large or cumbersome to supply in the config. -The file option only supports one SQL statement. The plugin will only accept one of the options. -It cannot read a statement from a file as well as from the `statement` configuration parameter. - -==== Configuring multiple SQL statements - -Configuring multiple SQL statements is useful when there is a need to query and ingest data -from different database tables or views. It is possible to define separate Logstash -configuration files for each statement or to define multiple statements in a single configuration -file. When using multiple statements in a single Logstash configuration file, each statement -has to be defined as a separate jdbc input (including jdbc driver, connection string and other -required parameters). - -Please note that if any of the statements use the `sql_last_value` parameter (e.g. for -ingesting only data changed since last run), each input should define its own -`last_run_metadata_path` parameter. Failure to do so will result in undesired behaviour, as -all inputs will store their state to the same (default) metadata file, effectively -overwriting each other's `sql_last_value`. - -==== Predefined Parameters - -Some parameters are built-in and can be used from within your queries. -Here is the list: - -|========================================================== -|sql_last_value | The value used to calculate which rows to query. Before any query is run, -this is set to Thursday, 1 January 1970, or 0 if `use_column_value` is true and -`tracking_column` is set. It is updated accordingly after subsequent queries are run. -|========================================================== - -Example: -[source,ruby] ---------------------------------------------------------------------------------------------------- -input { - jdbc { - statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value" - use_column_value => true - tracking_column => "id" - # ... other configuration bits - } -} ---------------------------------------------------------------------------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jdbc Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-clean_run>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-columns_charset>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_default_timezone>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_fetch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_page_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password_filepath>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-last_run_metadata_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lowercase_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-record_last_run>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sequel_opts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sql_log_level>> |<>, one of `["fatal", "error", "warn", "info", "debug"]`|No -| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-statement_filepath>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-tracking_column>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tracking_column_type>> |<>, one of `["numeric", "timestamp"]`|No -| <<{version}-plugins-{type}s-{plugin}-use_column_value>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-clean_run"] -===== `clean_run` - - * Value type is <> - * Default value is `false` - -Whether the previous run state should be preserved - -[id="{version}-plugins-{type}s-{plugin}-columns_charset"] -===== `columns_charset` - - * Value type is <> - * Default value is `{}` - -The character encoding for specific columns. This option will override the `:charset` option -for the specified columns. - -Example: -[source,ruby] -------------------------------------------------------- -input { - jdbc { - ... - columns_charset => { "column0" => "ISO-8859-1" } - ... - } -} -------------------------------------------------------- -this will only convert column0 that has ISO-8859-1 as an original encoding. - -[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts"] -===== `connection_retry_attempts` - - * Value type is <> - * Default value is `1` - -Maximum number of times to try connecting to database - -[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time"] -===== `connection_retry_attempts_wait_time` - - * Value type is <> - * Default value is `0.5` - -Number of seconds to sleep between connection attempts - -[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] -===== `jdbc_connection_string` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC connection string - -[id="{version}-plugins-{type}s-{plugin}-jdbc_default_timezone"] -===== `jdbc_default_timezone` - - * Value type is <> - * There is no default value for this setting. - -Timezone conversion. -SQL does not allow for timezone data in timestamp fields. This plugin will automatically -convert your SQL timestamp fields to Logstash timestamps, in relative UTC time in ISO8601 format. - -Using this setting will manually assign a specified timezone offset, instead -of using the timezone setting of the local machine. You must use a canonical -timezone, *America/Denver*, for example. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] -===== `jdbc_driver_class` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC driver class to load, for exmaple, "org.apache.derby.jdbc.ClientDriver" -NB per https://github.com/logstash-plugins/logstash-input-jdbc/issues/43 if you are using -the Oracle JDBC driver (ojdbc6.jar) the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"` - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] -===== `jdbc_driver_library` - - * Value type is <> - * There is no default value for this setting. - -Tentative of abstracting JDBC logic to a mixin -for potential reuse in other plugins (input/output) -This method is called when someone includes this module -Add these methods to the 'base' given. -JDBC driver library path to third party driver library. In case of multiple libraries being -required you can pass them separated by a comma. - -If not provided, Plugin will look for the driver class in the Logstash Java classpath. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_fetch_size"] -===== `jdbc_fetch_size` - - * Value type is <> - * There is no default value for this setting. - -JDBC fetch size. if not provided, respective driver's default will be used - -[id="{version}-plugins-{type}s-{plugin}-jdbc_page_size"] -===== `jdbc_page_size` - - * Value type is <> - * Default value is `100000` - -JDBC page size - -[id="{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled"] -===== `jdbc_paging_enabled` - - * Value type is <> - * Default value is `false` - -JDBC enable paging - -This will cause a sql statement to be broken up into multiple queries. -Each query will use limits and offsets to collectively retrieve the full -result-set. The limit size is set with `jdbc_page_size`. - -Be aware that ordering is not guaranteed between queries. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] -===== `jdbc_password` - - * Value type is <> - * There is no default value for this setting. - -JDBC password - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password_filepath"] -===== `jdbc_password_filepath` - - * Value type is <> - * There is no default value for this setting. - -JDBC password filename - -[id="{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout"] -===== `jdbc_pool_timeout` - - * Value type is <> - * Default value is `5` - -Connection pool configuration. -The amount of seconds to wait to acquire a connection before raising a PoolTimeoutError (default 5) - -[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] -===== `jdbc_user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC user - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] -===== `jdbc_validate_connection` - - * Value type is <> - * Default value is `false` - -Connection pool configuration. -Validate connection before use. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] -===== `jdbc_validation_timeout` - - * Value type is <> - * Default value is `3600` - -Connection pool configuration. -How often to validate a connection (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-last_run_metadata_path"] -===== `last_run_metadata_path` - - * Value type is <> - * Default value is `"/home/ph/.logstash_jdbc_last_run"` - -Path to file with last run time - -[id="{version}-plugins-{type}s-{plugin}-lowercase_column_names"] -===== `lowercase_column_names` - - * Value type is <> - * Default value is `true` - -Whether to force the lowercasing of identifier fields - -[id="{version}-plugins-{type}s-{plugin}-parameters"] -===== `parameters` - - * Value type is <> - * Default value is `{}` - -Hash of query parameter, for example `{ "target_id" => "321" }` - -[id="{version}-plugins-{type}s-{plugin}-record_last_run"] -===== `record_last_run` - - * Value type is <> - * Default value is `true` - -Whether to save state or not in last_run_metadata_path - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically run statement, in Cron format -for example: "* * * * *" (execute query every minute, on the minute) - -There is no schedule by default. If no schedule is given, then the statement is run -exactly once. - -[id="{version}-plugins-{type}s-{plugin}-sequel_opts"] -===== `sequel_opts` - - * Value type is <> - * Default value is `{}` - -General/Vendor-specific Sequel configuration options. - -An example of an optional connection pool configuration - max_connections - The maximum number of connections the connection pool - -examples of vendor-specific options can be found in this -documentation page: https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc - -[id="{version}-plugins-{type}s-{plugin}-sql_log_level"] -===== `sql_log_level` - - * Value can be any of: `fatal`, `error`, `warn`, `info`, `debug` - * Default value is `"info"` - -Log level at which to log SQL queries, the accepted values are the common ones fatal, error, warn, -info and debug. The default value is info. - -[id="{version}-plugins-{type}s-{plugin}-statement"] -===== `statement` - - * Value type is <> - * There is no default value for this setting. - -If undefined, Logstash will complain, even if codec is unused. -Statement to execute - -To use parameters, use named parameter syntax. -For example: - -[source, ruby] ------------------------------------------------ -"SELECT * FROM MYTABLE WHERE id = :target_id" ------------------------------------------------ - -here, ":target_id" is a named parameter. You can configure named parameters -with the `parameters` setting. - -[id="{version}-plugins-{type}s-{plugin}-statement_filepath"] -===== `statement_filepath` - - * Value type is <> - * There is no default value for this setting. - -Path of file containing statement to execute - -[id="{version}-plugins-{type}s-{plugin}-tracking_column"] -===== `tracking_column` - - * Value type is <> - * There is no default value for this setting. - -If tracking column value rather than timestamp, the column whose value is to be tracked - -[id="{version}-plugins-{type}s-{plugin}-tracking_column_type"] -===== `tracking_column_type` - - * Value can be any of: `numeric`, `timestamp` - * Default value is `"numeric"` - -Type of tracking column. Currently only "numeric" and "timestamp" - -[id="{version}-plugins-{type}s-{plugin}-use_column_value"] -===== `use_column_value` - - * Value type is <> - * Default value is `false` - -Use an incremental column value rather than a timestamp - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jdbc-v4.2.2.asciidoc b/docs/versioned-plugins/inputs/jdbc-v4.2.2.asciidoc deleted file mode 100644 index 597a8b55e..000000000 --- a/docs/versioned-plugins/inputs/jdbc-v4.2.2.asciidoc +++ /dev/null @@ -1,486 +0,0 @@ -:plugin: jdbc -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.2.2 -:release_date: 2017-07-25 -:changelog_url: https://github.com/logstash-plugins/logstash-input-jdbc/blob/v4.2.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Jdbc input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This plugin was created as a way to ingest data in any database -with a JDBC interface into Logstash. You can periodically schedule ingestion -using a cron syntax (see `schedule` setting) or run the query one time to load -data into Logstash. Each row in the resultset becomes a single event. -Columns in the resultset are converted into fields in the event. - -==== Drivers - -This plugin does not come packaged with JDBC driver libraries. The desired -jdbc driver library must be explicitly passed in to the plugin using the -`jdbc_driver_library` configuration option. - -==== Scheduling - -Input from this plugin can be scheduled to run periodically according to a specific -schedule. This scheduling syntax is powered by https://github.com/jmettraux/rufus-scheduler[rufus-scheduler]. -The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ). - -Examples: - -|========================================================== -| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. -| `0 * * * *` | will execute on the 0th minute of every hour every day. -| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. -|========================================================== - - -Further documentation describing this syntax can be found https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings[here]. - -==== State - -The plugin will persist the `sql_last_value` parameter in the form of a -metadata file stored in the configured `last_run_metadata_path`. Upon query execution, -this file will be updated with the current value of `sql_last_value`. Next time -the pipeline starts up, this value will be updated by reading from the file. If -`clean_run` is set to true, this value will be ignored and `sql_last_value` will be -set to Jan 1, 1970, or 0 if `use_column_value` is true, as if no query has ever been executed. - -==== Dealing With Large Result-sets - -Many JDBC drivers use the `fetch_size` parameter to limit how many -results are pre-fetched at a time from the cursor into the client's cache -before retrieving more results from the result-set. This is configured in -this plugin using the `jdbc_fetch_size` configuration option. No fetch size -is set by default in this plugin, so the specific driver's default size will -be used. - -==== Usage: - -Here is an example of setting up the plugin to fetch data from a MySQL database. -First, we place the appropriate JDBC driver library in our current -path (this can be placed anywhere on your filesystem). In this example, we connect to -the 'mydb' database using the user: 'mysql' and wish to input all rows in the 'songs' -table that match a specific artist. The following examples demonstrates a possible -Logstash configuration for this. The `schedule` option in this example will -instruct the plugin to execute this input statement on the minute, every minute. - -[source,ruby] ------------------------------------------------------------------------------- -input { - jdbc { - jdbc_driver_library => "mysql-connector-java-5.1.36-bin.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://localhost:3306/mydb" - jdbc_user => "mysql" - parameters => { "favorite_artist" => "Beethoven" } - schedule => "* * * * *" - statement => "SELECT * from songs where artist = :favorite_artist" - } -} ------------------------------------------------------------------------------- - -==== Configuring SQL statement - -A sql statement is required for this input. This can be passed-in via a -statement option in the form of a string, or read from a file (`statement_filepath`). File -option is typically used when the SQL statement is large or cumbersome to supply in the config. -The file option only supports one SQL statement. The plugin will only accept one of the options. -It cannot read a statement from a file as well as from the `statement` configuration parameter. - -==== Configuring multiple SQL statements - -Configuring multiple SQL statements is useful when there is a need to query and ingest data -from different database tables or views. It is possible to define separate Logstash -configuration files for each statement or to define multiple statements in a single configuration -file. When using multiple statements in a single Logstash configuration file, each statement -has to be defined as a separate jdbc input (including jdbc driver, connection string and other -required parameters). - -Please note that if any of the statements use the `sql_last_value` parameter (e.g. for -ingesting only data changed since last run), each input should define its own -`last_run_metadata_path` parameter. Failure to do so will result in undesired behaviour, as -all inputs will store their state to the same (default) metadata file, effectively -overwriting each other's `sql_last_value`. - -==== Predefined Parameters - -Some parameters are built-in and can be used from within your queries. -Here is the list: - -|========================================================== -|sql_last_value | The value used to calculate which rows to query. Before any query is run, -this is set to Thursday, 1 January 1970, or 0 if `use_column_value` is true and -`tracking_column` is set. It is updated accordingly after subsequent queries are run. -|========================================================== - -Example: -[source,ruby] ---------------------------------------------------------------------------------------------------- -input { - jdbc { - statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value" - use_column_value => true - tracking_column => "id" - # ... other configuration bits - } -} ---------------------------------------------------------------------------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jdbc Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-clean_run>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-columns_charset>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_default_timezone>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_fetch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_page_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password_filepath>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-last_run_metadata_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lowercase_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-record_last_run>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sequel_opts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sql_log_level>> |<>, one of `["fatal", "error", "warn", "info", "debug"]`|No -| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-statement_filepath>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-tracking_column>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tracking_column_type>> |<>, one of `["numeric", "timestamp"]`|No -| <<{version}-plugins-{type}s-{plugin}-use_column_value>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-clean_run"] -===== `clean_run` - - * Value type is <> - * Default value is `false` - -Whether the previous run state should be preserved - -[id="{version}-plugins-{type}s-{plugin}-columns_charset"] -===== `columns_charset` - - * Value type is <> - * Default value is `{}` - -The character encoding for specific columns. This option will override the `:charset` option -for the specified columns. - -Example: -[source,ruby] -------------------------------------------------------- -input { - jdbc { - ... - columns_charset => { "column0" => "ISO-8859-1" } - ... - } -} -------------------------------------------------------- -this will only convert column0 that has ISO-8859-1 as an original encoding. - -[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts"] -===== `connection_retry_attempts` - - * Value type is <> - * Default value is `1` - -Maximum number of times to try connecting to database - -[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time"] -===== `connection_retry_attempts_wait_time` - - * Value type is <> - * Default value is `0.5` - -Number of seconds to sleep between connection attempts - -[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] -===== `jdbc_connection_string` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC connection string - -[id="{version}-plugins-{type}s-{plugin}-jdbc_default_timezone"] -===== `jdbc_default_timezone` - - * Value type is <> - * There is no default value for this setting. - -Timezone conversion. -SQL does not allow for timezone data in timestamp fields. This plugin will automatically -convert your SQL timestamp fields to Logstash timestamps, in relative UTC time in ISO8601 format. - -Using this setting will manually assign a specified timezone offset, instead -of using the timezone setting of the local machine. You must use a canonical -timezone, *America/Denver*, for example. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] -===== `jdbc_driver_class` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC driver class to load, for exmaple, "org.apache.derby.jdbc.ClientDriver" -NB per https://github.com/logstash-plugins/logstash-input-jdbc/issues/43 if you are using -the Oracle JDBC driver (ojdbc6.jar) the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"` - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] -===== `jdbc_driver_library` - - * Value type is <> - * There is no default value for this setting. - -Tentative of abstracting JDBC logic to a mixin -for potential reuse in other plugins (input/output) -This method is called when someone includes this module -Add these methods to the 'base' given. -JDBC driver library path to third party driver library. In case of multiple libraries being -required you can pass them separated by a comma. - -If not provided, Plugin will look for the driver class in the Logstash Java classpath. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_fetch_size"] -===== `jdbc_fetch_size` - - * Value type is <> - * There is no default value for this setting. - -JDBC fetch size. if not provided, respective driver's default will be used - -[id="{version}-plugins-{type}s-{plugin}-jdbc_page_size"] -===== `jdbc_page_size` - - * Value type is <> - * Default value is `100000` - -JDBC page size - -[id="{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled"] -===== `jdbc_paging_enabled` - - * Value type is <> - * Default value is `false` - -JDBC enable paging - -This will cause a sql statement to be broken up into multiple queries. -Each query will use limits and offsets to collectively retrieve the full -result-set. The limit size is set with `jdbc_page_size`. - -Be aware that ordering is not guaranteed between queries. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] -===== `jdbc_password` - - * Value type is <> - * There is no default value for this setting. - -JDBC password - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password_filepath"] -===== `jdbc_password_filepath` - - * Value type is <> - * There is no default value for this setting. - -JDBC password filename - -[id="{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout"] -===== `jdbc_pool_timeout` - - * Value type is <> - * Default value is `5` - -Connection pool configuration. -The amount of seconds to wait to acquire a connection before raising a PoolTimeoutError (default 5) - -[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] -===== `jdbc_user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC user - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] -===== `jdbc_validate_connection` - - * Value type is <> - * Default value is `false` - -Connection pool configuration. -Validate connection before use. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] -===== `jdbc_validation_timeout` - - * Value type is <> - * Default value is `3600` - -Connection pool configuration. -How often to validate a connection (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-last_run_metadata_path"] -===== `last_run_metadata_path` - - * Value type is <> - * Default value is `"/home/ph/.logstash_jdbc_last_run"` - -Path to file with last run time - -[id="{version}-plugins-{type}s-{plugin}-lowercase_column_names"] -===== `lowercase_column_names` - - * Value type is <> - * Default value is `true` - -Whether to force the lowercasing of identifier fields - -[id="{version}-plugins-{type}s-{plugin}-parameters"] -===== `parameters` - - * Value type is <> - * Default value is `{}` - -Hash of query parameter, for example `{ "target_id" => "321" }` - -[id="{version}-plugins-{type}s-{plugin}-record_last_run"] -===== `record_last_run` - - * Value type is <> - * Default value is `true` - -Whether to save state or not in last_run_metadata_path - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically run statement, in Cron format -for example: "* * * * *" (execute query every minute, on the minute) - -There is no schedule by default. If no schedule is given, then the statement is run -exactly once. - -[id="{version}-plugins-{type}s-{plugin}-sequel_opts"] -===== `sequel_opts` - - * Value type is <> - * Default value is `{}` - -General/Vendor-specific Sequel configuration options. - -An example of an optional connection pool configuration - max_connections - The maximum number of connections the connection pool - -examples of vendor-specific options can be found in this -documentation page: https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc - -[id="{version}-plugins-{type}s-{plugin}-sql_log_level"] -===== `sql_log_level` - - * Value can be any of: `fatal`, `error`, `warn`, `info`, `debug` - * Default value is `"info"` - -Log level at which to log SQL queries, the accepted values are the common ones fatal, error, warn, -info and debug. The default value is info. - -[id="{version}-plugins-{type}s-{plugin}-statement"] -===== `statement` - - * Value type is <> - * There is no default value for this setting. - -If undefined, Logstash will complain, even if codec is unused. -Statement to execute - -To use parameters, use named parameter syntax. -For example: - -[source, ruby] ------------------------------------------------ -"SELECT * FROM MYTABLE WHERE id = :target_id" ------------------------------------------------ - -here, ":target_id" is a named parameter. You can configure named parameters -with the `parameters` setting. - -[id="{version}-plugins-{type}s-{plugin}-statement_filepath"] -===== `statement_filepath` - - * Value type is <> - * There is no default value for this setting. - -Path of file containing statement to execute - -[id="{version}-plugins-{type}s-{plugin}-tracking_column"] -===== `tracking_column` - - * Value type is <> - * There is no default value for this setting. - -If tracking column value rather than timestamp, the column whose value is to be tracked - -[id="{version}-plugins-{type}s-{plugin}-tracking_column_type"] -===== `tracking_column_type` - - * Value can be any of: `numeric`, `timestamp` - * Default value is `"numeric"` - -Type of tracking column. Currently only "numeric" and "timestamp" - -[id="{version}-plugins-{type}s-{plugin}-use_column_value"] -===== `use_column_value` - - * Value type is <> - * Default value is `false` - -Use an incremental column value rather than a timestamp - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jdbc-v4.2.3.asciidoc b/docs/versioned-plugins/inputs/jdbc-v4.2.3.asciidoc deleted file mode 100644 index 867104753..000000000 --- a/docs/versioned-plugins/inputs/jdbc-v4.2.3.asciidoc +++ /dev/null @@ -1,486 +0,0 @@ -:plugin: jdbc -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.2.3 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-jdbc/blob/v4.2.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Jdbc input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This plugin was created as a way to ingest data in any database -with a JDBC interface into Logstash. You can periodically schedule ingestion -using a cron syntax (see `schedule` setting) or run the query one time to load -data into Logstash. Each row in the resultset becomes a single event. -Columns in the resultset are converted into fields in the event. - -==== Drivers - -This plugin does not come packaged with JDBC driver libraries. The desired -jdbc driver library must be explicitly passed in to the plugin using the -`jdbc_driver_library` configuration option. - -==== Scheduling - -Input from this plugin can be scheduled to run periodically according to a specific -schedule. This scheduling syntax is powered by https://github.com/jmettraux/rufus-scheduler[rufus-scheduler]. -The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ). - -Examples: - -|========================================================== -| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. -| `0 * * * *` | will execute on the 0th minute of every hour every day. -| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. -|========================================================== - - -Further documentation describing this syntax can be found https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings[here]. - -==== State - -The plugin will persist the `sql_last_value` parameter in the form of a -metadata file stored in the configured `last_run_metadata_path`. Upon query execution, -this file will be updated with the current value of `sql_last_value`. Next time -the pipeline starts up, this value will be updated by reading from the file. If -`clean_run` is set to true, this value will be ignored and `sql_last_value` will be -set to Jan 1, 1970, or 0 if `use_column_value` is true, as if no query has ever been executed. - -==== Dealing With Large Result-sets - -Many JDBC drivers use the `fetch_size` parameter to limit how many -results are pre-fetched at a time from the cursor into the client's cache -before retrieving more results from the result-set. This is configured in -this plugin using the `jdbc_fetch_size` configuration option. No fetch size -is set by default in this plugin, so the specific driver's default size will -be used. - -==== Usage: - -Here is an example of setting up the plugin to fetch data from a MySQL database. -First, we place the appropriate JDBC driver library in our current -path (this can be placed anywhere on your filesystem). In this example, we connect to -the 'mydb' database using the user: 'mysql' and wish to input all rows in the 'songs' -table that match a specific artist. The following examples demonstrates a possible -Logstash configuration for this. The `schedule` option in this example will -instruct the plugin to execute this input statement on the minute, every minute. - -[source,ruby] ------------------------------------------------------------------------------- -input { - jdbc { - jdbc_driver_library => "mysql-connector-java-5.1.36-bin.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://localhost:3306/mydb" - jdbc_user => "mysql" - parameters => { "favorite_artist" => "Beethoven" } - schedule => "* * * * *" - statement => "SELECT * from songs where artist = :favorite_artist" - } -} ------------------------------------------------------------------------------- - -==== Configuring SQL statement - -A sql statement is required for this input. This can be passed-in via a -statement option in the form of a string, or read from a file (`statement_filepath`). File -option is typically used when the SQL statement is large or cumbersome to supply in the config. -The file option only supports one SQL statement. The plugin will only accept one of the options. -It cannot read a statement from a file as well as from the `statement` configuration parameter. - -==== Configuring multiple SQL statements - -Configuring multiple SQL statements is useful when there is a need to query and ingest data -from different database tables or views. It is possible to define separate Logstash -configuration files for each statement or to define multiple statements in a single configuration -file. When using multiple statements in a single Logstash configuration file, each statement -has to be defined as a separate jdbc input (including jdbc driver, connection string and other -required parameters). - -Please note that if any of the statements use the `sql_last_value` parameter (e.g. for -ingesting only data changed since last run), each input should define its own -`last_run_metadata_path` parameter. Failure to do so will result in undesired behaviour, as -all inputs will store their state to the same (default) metadata file, effectively -overwriting each other's `sql_last_value`. - -==== Predefined Parameters - -Some parameters are built-in and can be used from within your queries. -Here is the list: - -|========================================================== -|sql_last_value | The value used to calculate which rows to query. Before any query is run, -this is set to Thursday, 1 January 1970, or 0 if `use_column_value` is true and -`tracking_column` is set. It is updated accordingly after subsequent queries are run. -|========================================================== - -Example: -[source,ruby] ---------------------------------------------------------------------------------------------------- -input { - jdbc { - statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value" - use_column_value => true - tracking_column => "id" - # ... other configuration bits - } -} ---------------------------------------------------------------------------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jdbc Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-clean_run>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-columns_charset>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_default_timezone>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_fetch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_page_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password_filepath>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-last_run_metadata_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lowercase_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-record_last_run>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sequel_opts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sql_log_level>> |<>, one of `["fatal", "error", "warn", "info", "debug"]`|No -| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-statement_filepath>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-tracking_column>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tracking_column_type>> |<>, one of `["numeric", "timestamp"]`|No -| <<{version}-plugins-{type}s-{plugin}-use_column_value>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-clean_run"] -===== `clean_run` - - * Value type is <> - * Default value is `false` - -Whether the previous run state should be preserved - -[id="{version}-plugins-{type}s-{plugin}-columns_charset"] -===== `columns_charset` - - * Value type is <> - * Default value is `{}` - -The character encoding for specific columns. This option will override the `:charset` option -for the specified columns. - -Example: -[source,ruby] -------------------------------------------------------- -input { - jdbc { - ... - columns_charset => { "column0" => "ISO-8859-1" } - ... - } -} -------------------------------------------------------- -this will only convert column0 that has ISO-8859-1 as an original encoding. - -[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts"] -===== `connection_retry_attempts` - - * Value type is <> - * Default value is `1` - -Maximum number of times to try connecting to database - -[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time"] -===== `connection_retry_attempts_wait_time` - - * Value type is <> - * Default value is `0.5` - -Number of seconds to sleep between connection attempts - -[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] -===== `jdbc_connection_string` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC connection string - -[id="{version}-plugins-{type}s-{plugin}-jdbc_default_timezone"] -===== `jdbc_default_timezone` - - * Value type is <> - * There is no default value for this setting. - -Timezone conversion. -SQL does not allow for timezone data in timestamp fields. This plugin will automatically -convert your SQL timestamp fields to Logstash timestamps, in relative UTC time in ISO8601 format. - -Using this setting will manually assign a specified timezone offset, instead -of using the timezone setting of the local machine. You must use a canonical -timezone, *America/Denver*, for example. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] -===== `jdbc_driver_class` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC driver class to load, for exmaple, "org.apache.derby.jdbc.ClientDriver" -NB per https://github.com/logstash-plugins/logstash-input-jdbc/issues/43 if you are using -the Oracle JDBC driver (ojdbc6.jar) the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"` - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] -===== `jdbc_driver_library` - - * Value type is <> - * There is no default value for this setting. - -Tentative of abstracting JDBC logic to a mixin -for potential reuse in other plugins (input/output) -This method is called when someone includes this module -Add these methods to the 'base' given. -JDBC driver library path to third party driver library. In case of multiple libraries being -required you can pass them separated by a comma. - -If not provided, Plugin will look for the driver class in the Logstash Java classpath. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_fetch_size"] -===== `jdbc_fetch_size` - - * Value type is <> - * There is no default value for this setting. - -JDBC fetch size. if not provided, respective driver's default will be used - -[id="{version}-plugins-{type}s-{plugin}-jdbc_page_size"] -===== `jdbc_page_size` - - * Value type is <> - * Default value is `100000` - -JDBC page size - -[id="{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled"] -===== `jdbc_paging_enabled` - - * Value type is <> - * Default value is `false` - -JDBC enable paging - -This will cause a sql statement to be broken up into multiple queries. -Each query will use limits and offsets to collectively retrieve the full -result-set. The limit size is set with `jdbc_page_size`. - -Be aware that ordering is not guaranteed between queries. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] -===== `jdbc_password` - - * Value type is <> - * There is no default value for this setting. - -JDBC password - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password_filepath"] -===== `jdbc_password_filepath` - - * Value type is <> - * There is no default value for this setting. - -JDBC password filename - -[id="{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout"] -===== `jdbc_pool_timeout` - - * Value type is <> - * Default value is `5` - -Connection pool configuration. -The amount of seconds to wait to acquire a connection before raising a PoolTimeoutError (default 5) - -[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] -===== `jdbc_user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC user - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] -===== `jdbc_validate_connection` - - * Value type is <> - * Default value is `false` - -Connection pool configuration. -Validate connection before use. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] -===== `jdbc_validation_timeout` - - * Value type is <> - * Default value is `3600` - -Connection pool configuration. -How often to validate a connection (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-last_run_metadata_path"] -===== `last_run_metadata_path` - - * Value type is <> - * Default value is `"/home/ph/.logstash_jdbc_last_run"` - -Path to file with last run time - -[id="{version}-plugins-{type}s-{plugin}-lowercase_column_names"] -===== `lowercase_column_names` - - * Value type is <> - * Default value is `true` - -Whether to force the lowercasing of identifier fields - -[id="{version}-plugins-{type}s-{plugin}-parameters"] -===== `parameters` - - * Value type is <> - * Default value is `{}` - -Hash of query parameter, for example `{ "target_id" => "321" }` - -[id="{version}-plugins-{type}s-{plugin}-record_last_run"] -===== `record_last_run` - - * Value type is <> - * Default value is `true` - -Whether to save state or not in last_run_metadata_path - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically run statement, in Cron format -for example: "* * * * *" (execute query every minute, on the minute) - -There is no schedule by default. If no schedule is given, then the statement is run -exactly once. - -[id="{version}-plugins-{type}s-{plugin}-sequel_opts"] -===== `sequel_opts` - - * Value type is <> - * Default value is `{}` - -General/Vendor-specific Sequel configuration options. - -An example of an optional connection pool configuration - max_connections - The maximum number of connections the connection pool - -examples of vendor-specific options can be found in this -documentation page: https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc - -[id="{version}-plugins-{type}s-{plugin}-sql_log_level"] -===== `sql_log_level` - - * Value can be any of: `fatal`, `error`, `warn`, `info`, `debug` - * Default value is `"info"` - -Log level at which to log SQL queries, the accepted values are the common ones fatal, error, warn, -info and debug. The default value is info. - -[id="{version}-plugins-{type}s-{plugin}-statement"] -===== `statement` - - * Value type is <> - * There is no default value for this setting. - -If undefined, Logstash will complain, even if codec is unused. -Statement to execute - -To use parameters, use named parameter syntax. -For example: - -[source, ruby] ------------------------------------------------ -"SELECT * FROM MYTABLE WHERE id = :target_id" ------------------------------------------------ - -here, ":target_id" is a named parameter. You can configure named parameters -with the `parameters` setting. - -[id="{version}-plugins-{type}s-{plugin}-statement_filepath"] -===== `statement_filepath` - - * Value type is <> - * There is no default value for this setting. - -Path of file containing statement to execute - -[id="{version}-plugins-{type}s-{plugin}-tracking_column"] -===== `tracking_column` - - * Value type is <> - * There is no default value for this setting. - -If tracking column value rather than timestamp, the column whose value is to be tracked - -[id="{version}-plugins-{type}s-{plugin}-tracking_column_type"] -===== `tracking_column_type` - - * Value can be any of: `numeric`, `timestamp` - * Default value is `"numeric"` - -Type of tracking column. Currently only "numeric" and "timestamp" - -[id="{version}-plugins-{type}s-{plugin}-use_column_value"] -===== `use_column_value` - - * Value type is <> - * Default value is `false` - -Use an incremental column value rather than a timestamp - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jdbc-v4.2.4.asciidoc b/docs/versioned-plugins/inputs/jdbc-v4.2.4.asciidoc deleted file mode 100644 index b6545a6dc..000000000 --- a/docs/versioned-plugins/inputs/jdbc-v4.2.4.asciidoc +++ /dev/null @@ -1,486 +0,0 @@ -:plugin: jdbc -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.2.4 -:release_date: 2017-08-21 -:changelog_url: https://github.com/logstash-plugins/logstash-input-jdbc/blob/v4.2.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Jdbc input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This plugin was created as a way to ingest data in any database -with a JDBC interface into Logstash. You can periodically schedule ingestion -using a cron syntax (see `schedule` setting) or run the query one time to load -data into Logstash. Each row in the resultset becomes a single event. -Columns in the resultset are converted into fields in the event. - -==== Drivers - -This plugin does not come packaged with JDBC driver libraries. The desired -jdbc driver library must be explicitly passed in to the plugin using the -`jdbc_driver_library` configuration option. - -==== Scheduling - -Input from this plugin can be scheduled to run periodically according to a specific -schedule. This scheduling syntax is powered by https://github.com/jmettraux/rufus-scheduler[rufus-scheduler]. -The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ). - -Examples: - -|========================================================== -| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. -| `0 * * * *` | will execute on the 0th minute of every hour every day. -| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. -|========================================================== - - -Further documentation describing this syntax can be found https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings[here]. - -==== State - -The plugin will persist the `sql_last_value` parameter in the form of a -metadata file stored in the configured `last_run_metadata_path`. Upon query execution, -this file will be updated with the current value of `sql_last_value`. Next time -the pipeline starts up, this value will be updated by reading from the file. If -`clean_run` is set to true, this value will be ignored and `sql_last_value` will be -set to Jan 1, 1970, or 0 if `use_column_value` is true, as if no query has ever been executed. - -==== Dealing With Large Result-sets - -Many JDBC drivers use the `fetch_size` parameter to limit how many -results are pre-fetched at a time from the cursor into the client's cache -before retrieving more results from the result-set. This is configured in -this plugin using the `jdbc_fetch_size` configuration option. No fetch size -is set by default in this plugin, so the specific driver's default size will -be used. - -==== Usage: - -Here is an example of setting up the plugin to fetch data from a MySQL database. -First, we place the appropriate JDBC driver library in our current -path (this can be placed anywhere on your filesystem). In this example, we connect to -the 'mydb' database using the user: 'mysql' and wish to input all rows in the 'songs' -table that match a specific artist. The following examples demonstrates a possible -Logstash configuration for this. The `schedule` option in this example will -instruct the plugin to execute this input statement on the minute, every minute. - -[source,ruby] ------------------------------------------------------------------------------- -input { - jdbc { - jdbc_driver_library => "mysql-connector-java-5.1.36-bin.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://localhost:3306/mydb" - jdbc_user => "mysql" - parameters => { "favorite_artist" => "Beethoven" } - schedule => "* * * * *" - statement => "SELECT * from songs where artist = :favorite_artist" - } -} ------------------------------------------------------------------------------- - -==== Configuring SQL statement - -A sql statement is required for this input. This can be passed-in via a -statement option in the form of a string, or read from a file (`statement_filepath`). File -option is typically used when the SQL statement is large or cumbersome to supply in the config. -The file option only supports one SQL statement. The plugin will only accept one of the options. -It cannot read a statement from a file as well as from the `statement` configuration parameter. - -==== Configuring multiple SQL statements - -Configuring multiple SQL statements is useful when there is a need to query and ingest data -from different database tables or views. It is possible to define separate Logstash -configuration files for each statement or to define multiple statements in a single configuration -file. When using multiple statements in a single Logstash configuration file, each statement -has to be defined as a separate jdbc input (including jdbc driver, connection string and other -required parameters). - -Please note that if any of the statements use the `sql_last_value` parameter (e.g. for -ingesting only data changed since last run), each input should define its own -`last_run_metadata_path` parameter. Failure to do so will result in undesired behaviour, as -all inputs will store their state to the same (default) metadata file, effectively -overwriting each other's `sql_last_value`. - -==== Predefined Parameters - -Some parameters are built-in and can be used from within your queries. -Here is the list: - -|========================================================== -|sql_last_value | The value used to calculate which rows to query. Before any query is run, -this is set to Thursday, 1 January 1970, or 0 if `use_column_value` is true and -`tracking_column` is set. It is updated accordingly after subsequent queries are run. -|========================================================== - -Example: -[source,ruby] ---------------------------------------------------------------------------------------------------- -input { - jdbc { - statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value" - use_column_value => true - tracking_column => "id" - # ... other configuration bits - } -} ---------------------------------------------------------------------------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jdbc Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-clean_run>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-columns_charset>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_default_timezone>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_fetch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_page_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password_filepath>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-last_run_metadata_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lowercase_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-record_last_run>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sequel_opts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sql_log_level>> |<>, one of `["fatal", "error", "warn", "info", "debug"]`|No -| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-statement_filepath>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-tracking_column>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tracking_column_type>> |<>, one of `["numeric", "timestamp"]`|No -| <<{version}-plugins-{type}s-{plugin}-use_column_value>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-clean_run"] -===== `clean_run` - - * Value type is <> - * Default value is `false` - -Whether the previous run state should be preserved - -[id="{version}-plugins-{type}s-{plugin}-columns_charset"] -===== `columns_charset` - - * Value type is <> - * Default value is `{}` - -The character encoding for specific columns. This option will override the `:charset` option -for the specified columns. - -Example: -[source,ruby] -------------------------------------------------------- -input { - jdbc { - ... - columns_charset => { "column0" => "ISO-8859-1" } - ... - } -} -------------------------------------------------------- -this will only convert column0 that has ISO-8859-1 as an original encoding. - -[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts"] -===== `connection_retry_attempts` - - * Value type is <> - * Default value is `1` - -Maximum number of times to try connecting to database - -[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time"] -===== `connection_retry_attempts_wait_time` - - * Value type is <> - * Default value is `0.5` - -Number of seconds to sleep between connection attempts - -[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] -===== `jdbc_connection_string` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC connection string - -[id="{version}-plugins-{type}s-{plugin}-jdbc_default_timezone"] -===== `jdbc_default_timezone` - - * Value type is <> - * There is no default value for this setting. - -Timezone conversion. -SQL does not allow for timezone data in timestamp fields. This plugin will automatically -convert your SQL timestamp fields to Logstash timestamps, in relative UTC time in ISO8601 format. - -Using this setting will manually assign a specified timezone offset, instead -of using the timezone setting of the local machine. You must use a canonical -timezone, *America/Denver*, for example. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] -===== `jdbc_driver_class` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC driver class to load, for exmaple, "org.apache.derby.jdbc.ClientDriver" -NB per https://github.com/logstash-plugins/logstash-input-jdbc/issues/43 if you are using -the Oracle JDBC driver (ojdbc6.jar) the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"` - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] -===== `jdbc_driver_library` - - * Value type is <> - * There is no default value for this setting. - -Tentative of abstracting JDBC logic to a mixin -for potential reuse in other plugins (input/output) -This method is called when someone includes this module -Add these methods to the 'base' given. -JDBC driver library path to third party driver library. In case of multiple libraries being -required you can pass them separated by a comma. - -If not provided, Plugin will look for the driver class in the Logstash Java classpath. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_fetch_size"] -===== `jdbc_fetch_size` - - * Value type is <> - * There is no default value for this setting. - -JDBC fetch size. if not provided, respective driver's default will be used - -[id="{version}-plugins-{type}s-{plugin}-jdbc_page_size"] -===== `jdbc_page_size` - - * Value type is <> - * Default value is `100000` - -JDBC page size - -[id="{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled"] -===== `jdbc_paging_enabled` - - * Value type is <> - * Default value is `false` - -JDBC enable paging - -This will cause a sql statement to be broken up into multiple queries. -Each query will use limits and offsets to collectively retrieve the full -result-set. The limit size is set with `jdbc_page_size`. - -Be aware that ordering is not guaranteed between queries. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] -===== `jdbc_password` - - * Value type is <> - * There is no default value for this setting. - -JDBC password - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password_filepath"] -===== `jdbc_password_filepath` - - * Value type is <> - * There is no default value for this setting. - -JDBC password filename - -[id="{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout"] -===== `jdbc_pool_timeout` - - * Value type is <> - * Default value is `5` - -Connection pool configuration. -The amount of seconds to wait to acquire a connection before raising a PoolTimeoutError (default 5) - -[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] -===== `jdbc_user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC user - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] -===== `jdbc_validate_connection` - - * Value type is <> - * Default value is `false` - -Connection pool configuration. -Validate connection before use. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] -===== `jdbc_validation_timeout` - - * Value type is <> - * Default value is `3600` - -Connection pool configuration. -How often to validate a connection (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-last_run_metadata_path"] -===== `last_run_metadata_path` - - * Value type is <> - * Default value is `"/home/ph/.logstash_jdbc_last_run"` - -Path to file with last run time - -[id="{version}-plugins-{type}s-{plugin}-lowercase_column_names"] -===== `lowercase_column_names` - - * Value type is <> - * Default value is `true` - -Whether to force the lowercasing of identifier fields - -[id="{version}-plugins-{type}s-{plugin}-parameters"] -===== `parameters` - - * Value type is <> - * Default value is `{}` - -Hash of query parameter, for example `{ "target_id" => "321" }` - -[id="{version}-plugins-{type}s-{plugin}-record_last_run"] -===== `record_last_run` - - * Value type is <> - * Default value is `true` - -Whether to save state or not in last_run_metadata_path - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically run statement, in Cron format -for example: "* * * * *" (execute query every minute, on the minute) - -There is no schedule by default. If no schedule is given, then the statement is run -exactly once. - -[id="{version}-plugins-{type}s-{plugin}-sequel_opts"] -===== `sequel_opts` - - * Value type is <> - * Default value is `{}` - -General/Vendor-specific Sequel configuration options. - -An example of an optional connection pool configuration - max_connections - The maximum number of connections the connection pool - -examples of vendor-specific options can be found in this -documentation page: https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc - -[id="{version}-plugins-{type}s-{plugin}-sql_log_level"] -===== `sql_log_level` - - * Value can be any of: `fatal`, `error`, `warn`, `info`, `debug` - * Default value is `"info"` - -Log level at which to log SQL queries, the accepted values are the common ones fatal, error, warn, -info and debug. The default value is info. - -[id="{version}-plugins-{type}s-{plugin}-statement"] -===== `statement` - - * Value type is <> - * There is no default value for this setting. - -If undefined, Logstash will complain, even if codec is unused. -Statement to execute - -To use parameters, use named parameter syntax. -For example: - -[source, ruby] ------------------------------------------------ -"SELECT * FROM MYTABLE WHERE id = :target_id" ------------------------------------------------ - -here, ":target_id" is a named parameter. You can configure named parameters -with the `parameters` setting. - -[id="{version}-plugins-{type}s-{plugin}-statement_filepath"] -===== `statement_filepath` - - * Value type is <> - * There is no default value for this setting. - -Path of file containing statement to execute - -[id="{version}-plugins-{type}s-{plugin}-tracking_column"] -===== `tracking_column` - - * Value type is <> - * There is no default value for this setting. - -If tracking column value rather than timestamp, the column whose value is to be tracked - -[id="{version}-plugins-{type}s-{plugin}-tracking_column_type"] -===== `tracking_column_type` - - * Value can be any of: `numeric`, `timestamp` - * Default value is `"numeric"` - -Type of tracking column. Currently only "numeric" and "timestamp" - -[id="{version}-plugins-{type}s-{plugin}-use_column_value"] -===== `use_column_value` - - * Value type is <> - * Default value is `false` - -Use an incremental column value rather than a timestamp - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jdbc-v4.3.0.asciidoc b/docs/versioned-plugins/inputs/jdbc-v4.3.0.asciidoc deleted file mode 100644 index 1e1eed5aa..000000000 --- a/docs/versioned-plugins/inputs/jdbc-v4.3.0.asciidoc +++ /dev/null @@ -1,486 +0,0 @@ -:plugin: jdbc -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.3.0 -:release_date: 2017-10-27 -:changelog_url: https://github.com/logstash-plugins/logstash-input-jdbc/blob/v4.3.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Jdbc input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This plugin was created as a way to ingest data in any database -with a JDBC interface into Logstash. You can periodically schedule ingestion -using a cron syntax (see `schedule` setting) or run the query one time to load -data into Logstash. Each row in the resultset becomes a single event. -Columns in the resultset are converted into fields in the event. - -==== Drivers - -This plugin does not come packaged with JDBC driver libraries. The desired -jdbc driver library must be explicitly passed in to the plugin using the -`jdbc_driver_library` configuration option. - -==== Scheduling - -Input from this plugin can be scheduled to run periodically according to a specific -schedule. This scheduling syntax is powered by https://github.com/jmettraux/rufus-scheduler[rufus-scheduler]. -The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ). - -Examples: - -|========================================================== -| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. -| `0 * * * *` | will execute on the 0th minute of every hour every day. -| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. -|========================================================== - - -Further documentation describing this syntax can be found https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings[here]. - -==== State - -The plugin will persist the `sql_last_value` parameter in the form of a -metadata file stored in the configured `last_run_metadata_path`. Upon query execution, -this file will be updated with the current value of `sql_last_value`. Next time -the pipeline starts up, this value will be updated by reading from the file. If -`clean_run` is set to true, this value will be ignored and `sql_last_value` will be -set to Jan 1, 1970, or 0 if `use_column_value` is true, as if no query has ever been executed. - -==== Dealing With Large Result-sets - -Many JDBC drivers use the `fetch_size` parameter to limit how many -results are pre-fetched at a time from the cursor into the client's cache -before retrieving more results from the result-set. This is configured in -this plugin using the `jdbc_fetch_size` configuration option. No fetch size -is set by default in this plugin, so the specific driver's default size will -be used. - -==== Usage: - -Here is an example of setting up the plugin to fetch data from a MySQL database. -First, we place the appropriate JDBC driver library in our current -path (this can be placed anywhere on your filesystem). In this example, we connect to -the 'mydb' database using the user: 'mysql' and wish to input all rows in the 'songs' -table that match a specific artist. The following examples demonstrates a possible -Logstash configuration for this. The `schedule` option in this example will -instruct the plugin to execute this input statement on the minute, every minute. - -[source,ruby] ------------------------------------------------------------------------------- -input { - jdbc { - jdbc_driver_library => "mysql-connector-java-5.1.36-bin.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://localhost:3306/mydb" - jdbc_user => "mysql" - parameters => { "favorite_artist" => "Beethoven" } - schedule => "* * * * *" - statement => "SELECT * from songs where artist = :favorite_artist" - } -} ------------------------------------------------------------------------------- - -==== Configuring SQL statement - -A sql statement is required for this input. This can be passed-in via a -statement option in the form of a string, or read from a file (`statement_filepath`). File -option is typically used when the SQL statement is large or cumbersome to supply in the config. -The file option only supports one SQL statement. The plugin will only accept one of the options. -It cannot read a statement from a file as well as from the `statement` configuration parameter. - -==== Configuring multiple SQL statements - -Configuring multiple SQL statements is useful when there is a need to query and ingest data -from different database tables or views. It is possible to define separate Logstash -configuration files for each statement or to define multiple statements in a single configuration -file. When using multiple statements in a single Logstash configuration file, each statement -has to be defined as a separate jdbc input (including jdbc driver, connection string and other -required parameters). - -Please note that if any of the statements use the `sql_last_value` parameter (e.g. for -ingesting only data changed since last run), each input should define its own -`last_run_metadata_path` parameter. Failure to do so will result in undesired behaviour, as -all inputs will store their state to the same (default) metadata file, effectively -overwriting each other's `sql_last_value`. - -==== Predefined Parameters - -Some parameters are built-in and can be used from within your queries. -Here is the list: - -|========================================================== -|sql_last_value | The value used to calculate which rows to query. Before any query is run, -this is set to Thursday, 1 January 1970, or 0 if `use_column_value` is true and -`tracking_column` is set. It is updated accordingly after subsequent queries are run. -|========================================================== - -Example: -[source,ruby] ---------------------------------------------------------------------------------------------------- -input { - jdbc { - statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value" - use_column_value => true - tracking_column => "id" - # ... other configuration bits - } -} ---------------------------------------------------------------------------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jdbc Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-clean_run>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-columns_charset>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_default_timezone>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_fetch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_page_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password_filepath>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-last_run_metadata_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lowercase_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-record_last_run>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sequel_opts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sql_log_level>> |<>, one of `["fatal", "error", "warn", "info", "debug"]`|No -| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-statement_filepath>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-tracking_column>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tracking_column_type>> |<>, one of `["numeric", "timestamp"]`|No -| <<{version}-plugins-{type}s-{plugin}-use_column_value>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-clean_run"] -===== `clean_run` - - * Value type is <> - * Default value is `false` - -Whether the previous run state should be preserved - -[id="{version}-plugins-{type}s-{plugin}-columns_charset"] -===== `columns_charset` - - * Value type is <> - * Default value is `{}` - -The character encoding for specific columns. This option will override the `:charset` option -for the specified columns. - -Example: -[source,ruby] -------------------------------------------------------- -input { - jdbc { - ... - columns_charset => { "column0" => "ISO-8859-1" } - ... - } -} -------------------------------------------------------- -this will only convert column0 that has ISO-8859-1 as an original encoding. - -[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts"] -===== `connection_retry_attempts` - - * Value type is <> - * Default value is `1` - -Maximum number of times to try connecting to database - -[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time"] -===== `connection_retry_attempts_wait_time` - - * Value type is <> - * Default value is `0.5` - -Number of seconds to sleep between connection attempts - -[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] -===== `jdbc_connection_string` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC connection string - -[id="{version}-plugins-{type}s-{plugin}-jdbc_default_timezone"] -===== `jdbc_default_timezone` - - * Value type is <> - * There is no default value for this setting. - -Timezone conversion. -SQL does not allow for timezone data in timestamp fields. This plugin will automatically -convert your SQL timestamp fields to Logstash timestamps, in relative UTC time in ISO8601 format. - -Using this setting will manually assign a specified timezone offset, instead -of using the timezone setting of the local machine. You must use a canonical -timezone, *America/Denver*, for example. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] -===== `jdbc_driver_class` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC driver class to load, for exmaple, "org.apache.derby.jdbc.ClientDriver" -NB per https://github.com/logstash-plugins/logstash-input-jdbc/issues/43 if you are using -the Oracle JDBC driver (ojdbc6.jar) the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"` - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] -===== `jdbc_driver_library` - - * Value type is <> - * There is no default value for this setting. - -Tentative of abstracting JDBC logic to a mixin -for potential reuse in other plugins (input/output) -This method is called when someone includes this module -Add these methods to the 'base' given. -JDBC driver library path to third party driver library. In case of multiple libraries being -required you can pass them separated by a comma. - -If not provided, Plugin will look for the driver class in the Logstash Java classpath. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_fetch_size"] -===== `jdbc_fetch_size` - - * Value type is <> - * There is no default value for this setting. - -JDBC fetch size. if not provided, respective driver's default will be used - -[id="{version}-plugins-{type}s-{plugin}-jdbc_page_size"] -===== `jdbc_page_size` - - * Value type is <> - * Default value is `100000` - -JDBC page size - -[id="{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled"] -===== `jdbc_paging_enabled` - - * Value type is <> - * Default value is `false` - -JDBC enable paging - -This will cause a sql statement to be broken up into multiple queries. -Each query will use limits and offsets to collectively retrieve the full -result-set. The limit size is set with `jdbc_page_size`. - -Be aware that ordering is not guaranteed between queries. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] -===== `jdbc_password` - - * Value type is <> - * There is no default value for this setting. - -JDBC password - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password_filepath"] -===== `jdbc_password_filepath` - - * Value type is <> - * There is no default value for this setting. - -JDBC password filename - -[id="{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout"] -===== `jdbc_pool_timeout` - - * Value type is <> - * Default value is `5` - -Connection pool configuration. -The amount of seconds to wait to acquire a connection before raising a PoolTimeoutError (default 5) - -[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] -===== `jdbc_user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC user - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] -===== `jdbc_validate_connection` - - * Value type is <> - * Default value is `false` - -Connection pool configuration. -Validate connection before use. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] -===== `jdbc_validation_timeout` - - * Value type is <> - * Default value is `3600` - -Connection pool configuration. -How often to validate a connection (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-last_run_metadata_path"] -===== `last_run_metadata_path` - - * Value type is <> - * Default value is `"/home/ph/.logstash_jdbc_last_run"` - -Path to file with last run time - -[id="{version}-plugins-{type}s-{plugin}-lowercase_column_names"] -===== `lowercase_column_names` - - * Value type is <> - * Default value is `true` - -Whether to force the lowercasing of identifier fields - -[id="{version}-plugins-{type}s-{plugin}-parameters"] -===== `parameters` - - * Value type is <> - * Default value is `{}` - -Hash of query parameter, for example `{ "target_id" => "321" }` - -[id="{version}-plugins-{type}s-{plugin}-record_last_run"] -===== `record_last_run` - - * Value type is <> - * Default value is `true` - -Whether to save state or not in last_run_metadata_path - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically run statement, in Cron format -for example: "* * * * *" (execute query every minute, on the minute) - -There is no schedule by default. If no schedule is given, then the statement is run -exactly once. - -[id="{version}-plugins-{type}s-{plugin}-sequel_opts"] -===== `sequel_opts` - - * Value type is <> - * Default value is `{}` - -General/Vendor-specific Sequel configuration options. - -An example of an optional connection pool configuration - max_connections - The maximum number of connections the connection pool - -examples of vendor-specific options can be found in this -documentation page: https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc - -[id="{version}-plugins-{type}s-{plugin}-sql_log_level"] -===== `sql_log_level` - - * Value can be any of: `fatal`, `error`, `warn`, `info`, `debug` - * Default value is `"info"` - -Log level at which to log SQL queries, the accepted values are the common ones fatal, error, warn, -info and debug. The default value is info. - -[id="{version}-plugins-{type}s-{plugin}-statement"] -===== `statement` - - * Value type is <> - * There is no default value for this setting. - -If undefined, Logstash will complain, even if codec is unused. -Statement to execute - -To use parameters, use named parameter syntax. -For example: - -[source, ruby] ------------------------------------------------ -"SELECT * FROM MYTABLE WHERE id = :target_id" ------------------------------------------------ - -here, ":target_id" is a named parameter. You can configure named parameters -with the `parameters` setting. - -[id="{version}-plugins-{type}s-{plugin}-statement_filepath"] -===== `statement_filepath` - - * Value type is <> - * There is no default value for this setting. - -Path of file containing statement to execute - -[id="{version}-plugins-{type}s-{plugin}-tracking_column"] -===== `tracking_column` - - * Value type is <> - * There is no default value for this setting. - -If tracking column value rather than timestamp, the column whose value is to be tracked - -[id="{version}-plugins-{type}s-{plugin}-tracking_column_type"] -===== `tracking_column_type` - - * Value can be any of: `numeric`, `timestamp` - * Default value is `"numeric"` - -Type of tracking column. Currently only "numeric" and "timestamp" - -[id="{version}-plugins-{type}s-{plugin}-use_column_value"] -===== `use_column_value` - - * Value type is <> - * Default value is `false` - -Use an incremental column value rather than a timestamp - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jdbc-v4.3.1.asciidoc b/docs/versioned-plugins/inputs/jdbc-v4.3.1.asciidoc deleted file mode 100644 index d04ebd886..000000000 --- a/docs/versioned-plugins/inputs/jdbc-v4.3.1.asciidoc +++ /dev/null @@ -1,486 +0,0 @@ -:plugin: jdbc -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.3.1 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-jdbc/blob/v4.3.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Jdbc input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This plugin was created as a way to ingest data in any database -with a JDBC interface into Logstash. You can periodically schedule ingestion -using a cron syntax (see `schedule` setting) or run the query one time to load -data into Logstash. Each row in the resultset becomes a single event. -Columns in the resultset are converted into fields in the event. - -==== Drivers - -This plugin does not come packaged with JDBC driver libraries. The desired -jdbc driver library must be explicitly passed in to the plugin using the -`jdbc_driver_library` configuration option. - -==== Scheduling - -Input from this plugin can be scheduled to run periodically according to a specific -schedule. This scheduling syntax is powered by https://github.com/jmettraux/rufus-scheduler[rufus-scheduler]. -The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ). - -Examples: - -|========================================================== -| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. -| `0 * * * *` | will execute on the 0th minute of every hour every day. -| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. -|========================================================== - - -Further documentation describing this syntax can be found https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings[here]. - -==== State - -The plugin will persist the `sql_last_value` parameter in the form of a -metadata file stored in the configured `last_run_metadata_path`. Upon query execution, -this file will be updated with the current value of `sql_last_value`. Next time -the pipeline starts up, this value will be updated by reading from the file. If -`clean_run` is set to true, this value will be ignored and `sql_last_value` will be -set to Jan 1, 1970, or 0 if `use_column_value` is true, as if no query has ever been executed. - -==== Dealing With Large Result-sets - -Many JDBC drivers use the `fetch_size` parameter to limit how many -results are pre-fetched at a time from the cursor into the client's cache -before retrieving more results from the result-set. This is configured in -this plugin using the `jdbc_fetch_size` configuration option. No fetch size -is set by default in this plugin, so the specific driver's default size will -be used. - -==== Usage: - -Here is an example of setting up the plugin to fetch data from a MySQL database. -First, we place the appropriate JDBC driver library in our current -path (this can be placed anywhere on your filesystem). In this example, we connect to -the 'mydb' database using the user: 'mysql' and wish to input all rows in the 'songs' -table that match a specific artist. The following examples demonstrates a possible -Logstash configuration for this. The `schedule` option in this example will -instruct the plugin to execute this input statement on the minute, every minute. - -[source,ruby] ------------------------------------------------------------------------------- -input { - jdbc { - jdbc_driver_library => "mysql-connector-java-5.1.36-bin.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://localhost:3306/mydb" - jdbc_user => "mysql" - parameters => { "favorite_artist" => "Beethoven" } - schedule => "* * * * *" - statement => "SELECT * from songs where artist = :favorite_artist" - } -} ------------------------------------------------------------------------------- - -==== Configuring SQL statement - -A sql statement is required for this input. This can be passed-in via a -statement option in the form of a string, or read from a file (`statement_filepath`). File -option is typically used when the SQL statement is large or cumbersome to supply in the config. -The file option only supports one SQL statement. The plugin will only accept one of the options. -It cannot read a statement from a file as well as from the `statement` configuration parameter. - -==== Configuring multiple SQL statements - -Configuring multiple SQL statements is useful when there is a need to query and ingest data -from different database tables or views. It is possible to define separate Logstash -configuration files for each statement or to define multiple statements in a single configuration -file. When using multiple statements in a single Logstash configuration file, each statement -has to be defined as a separate jdbc input (including jdbc driver, connection string and other -required parameters). - -Please note that if any of the statements use the `sql_last_value` parameter (e.g. for -ingesting only data changed since last run), each input should define its own -`last_run_metadata_path` parameter. Failure to do so will result in undesired behaviour, as -all inputs will store their state to the same (default) metadata file, effectively -overwriting each other's `sql_last_value`. - -==== Predefined Parameters - -Some parameters are built-in and can be used from within your queries. -Here is the list: - -|========================================================== -|sql_last_value | The value used to calculate which rows to query. Before any query is run, -this is set to Thursday, 1 January 1970, or 0 if `use_column_value` is true and -`tracking_column` is set. It is updated accordingly after subsequent queries are run. -|========================================================== - -Example: -[source,ruby] ---------------------------------------------------------------------------------------------------- -input { - jdbc { - statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value" - use_column_value => true - tracking_column => "id" - # ... other configuration bits - } -} ---------------------------------------------------------------------------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jdbc Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-clean_run>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-columns_charset>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_default_timezone>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_fetch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_page_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password_filepath>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-last_run_metadata_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lowercase_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-record_last_run>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sequel_opts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sql_log_level>> |<>, one of `["fatal", "error", "warn", "info", "debug"]`|No -| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-statement_filepath>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-tracking_column>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tracking_column_type>> |<>, one of `["numeric", "timestamp"]`|No -| <<{version}-plugins-{type}s-{plugin}-use_column_value>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-clean_run"] -===== `clean_run` - - * Value type is <> - * Default value is `false` - -Whether the previous run state should be preserved - -[id="{version}-plugins-{type}s-{plugin}-columns_charset"] -===== `columns_charset` - - * Value type is <> - * Default value is `{}` - -The character encoding for specific columns. This option will override the `:charset` option -for the specified columns. - -Example: -[source,ruby] -------------------------------------------------------- -input { - jdbc { - ... - columns_charset => { "column0" => "ISO-8859-1" } - ... - } -} -------------------------------------------------------- -this will only convert column0 that has ISO-8859-1 as an original encoding. - -[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts"] -===== `connection_retry_attempts` - - * Value type is <> - * Default value is `1` - -Maximum number of times to try connecting to database - -[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time"] -===== `connection_retry_attempts_wait_time` - - * Value type is <> - * Default value is `0.5` - -Number of seconds to sleep between connection attempts - -[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] -===== `jdbc_connection_string` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC connection string - -[id="{version}-plugins-{type}s-{plugin}-jdbc_default_timezone"] -===== `jdbc_default_timezone` - - * Value type is <> - * There is no default value for this setting. - -Timezone conversion. -SQL does not allow for timezone data in timestamp fields. This plugin will automatically -convert your SQL timestamp fields to Logstash timestamps, in relative UTC time in ISO8601 format. - -Using this setting will manually assign a specified timezone offset, instead -of using the timezone setting of the local machine. You must use a canonical -timezone, *America/Denver*, for example. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] -===== `jdbc_driver_class` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC driver class to load, for exmaple, "org.apache.derby.jdbc.ClientDriver" -NB per https://github.com/logstash-plugins/logstash-input-jdbc/issues/43 if you are using -the Oracle JDBC driver (ojdbc6.jar) the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"` - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] -===== `jdbc_driver_library` - - * Value type is <> - * There is no default value for this setting. - -Tentative of abstracting JDBC logic to a mixin -for potential reuse in other plugins (input/output) -This method is called when someone includes this module -Add these methods to the 'base' given. -JDBC driver library path to third party driver library. In case of multiple libraries being -required you can pass them separated by a comma. - -If not provided, Plugin will look for the driver class in the Logstash Java classpath. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_fetch_size"] -===== `jdbc_fetch_size` - - * Value type is <> - * There is no default value for this setting. - -JDBC fetch size. if not provided, respective driver's default will be used - -[id="{version}-plugins-{type}s-{plugin}-jdbc_page_size"] -===== `jdbc_page_size` - - * Value type is <> - * Default value is `100000` - -JDBC page size - -[id="{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled"] -===== `jdbc_paging_enabled` - - * Value type is <> - * Default value is `false` - -JDBC enable paging - -This will cause a sql statement to be broken up into multiple queries. -Each query will use limits and offsets to collectively retrieve the full -result-set. The limit size is set with `jdbc_page_size`. - -Be aware that ordering is not guaranteed between queries. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] -===== `jdbc_password` - - * Value type is <> - * There is no default value for this setting. - -JDBC password - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password_filepath"] -===== `jdbc_password_filepath` - - * Value type is <> - * There is no default value for this setting. - -JDBC password filename - -[id="{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout"] -===== `jdbc_pool_timeout` - - * Value type is <> - * Default value is `5` - -Connection pool configuration. -The amount of seconds to wait to acquire a connection before raising a PoolTimeoutError (default 5) - -[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] -===== `jdbc_user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC user - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] -===== `jdbc_validate_connection` - - * Value type is <> - * Default value is `false` - -Connection pool configuration. -Validate connection before use. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] -===== `jdbc_validation_timeout` - - * Value type is <> - * Default value is `3600` - -Connection pool configuration. -How often to validate a connection (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-last_run_metadata_path"] -===== `last_run_metadata_path` - - * Value type is <> - * Default value is `"/home/ph/.logstash_jdbc_last_run"` - -Path to file with last run time - -[id="{version}-plugins-{type}s-{plugin}-lowercase_column_names"] -===== `lowercase_column_names` - - * Value type is <> - * Default value is `true` - -Whether to force the lowercasing of identifier fields - -[id="{version}-plugins-{type}s-{plugin}-parameters"] -===== `parameters` - - * Value type is <> - * Default value is `{}` - -Hash of query parameter, for example `{ "target_id" => "321" }` - -[id="{version}-plugins-{type}s-{plugin}-record_last_run"] -===== `record_last_run` - - * Value type is <> - * Default value is `true` - -Whether to save state or not in last_run_metadata_path - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically run statement, in Cron format -for example: "* * * * *" (execute query every minute, on the minute) - -There is no schedule by default. If no schedule is given, then the statement is run -exactly once. - -[id="{version}-plugins-{type}s-{plugin}-sequel_opts"] -===== `sequel_opts` - - * Value type is <> - * Default value is `{}` - -General/Vendor-specific Sequel configuration options. - -An example of an optional connection pool configuration - max_connections - The maximum number of connections the connection pool - -examples of vendor-specific options can be found in this -documentation page: https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc - -[id="{version}-plugins-{type}s-{plugin}-sql_log_level"] -===== `sql_log_level` - - * Value can be any of: `fatal`, `error`, `warn`, `info`, `debug` - * Default value is `"info"` - -Log level at which to log SQL queries, the accepted values are the common ones fatal, error, warn, -info and debug. The default value is info. - -[id="{version}-plugins-{type}s-{plugin}-statement"] -===== `statement` - - * Value type is <> - * There is no default value for this setting. - -If undefined, Logstash will complain, even if codec is unused. -Statement to execute - -To use parameters, use named parameter syntax. -For example: - -[source, ruby] ------------------------------------------------ -"SELECT * FROM MYTABLE WHERE id = :target_id" ------------------------------------------------ - -here, ":target_id" is a named parameter. You can configure named parameters -with the `parameters` setting. - -[id="{version}-plugins-{type}s-{plugin}-statement_filepath"] -===== `statement_filepath` - - * Value type is <> - * There is no default value for this setting. - -Path of file containing statement to execute - -[id="{version}-plugins-{type}s-{plugin}-tracking_column"] -===== `tracking_column` - - * Value type is <> - * There is no default value for this setting. - -If tracking column value rather than timestamp, the column whose value is to be tracked - -[id="{version}-plugins-{type}s-{plugin}-tracking_column_type"] -===== `tracking_column_type` - - * Value can be any of: `numeric`, `timestamp` - * Default value is `"numeric"` - -Type of tracking column. Currently only "numeric" and "timestamp" - -[id="{version}-plugins-{type}s-{plugin}-use_column_value"] -===== `use_column_value` - - * Value type is <> - * Default value is `false` - -Use an incremental column value rather than a timestamp - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jdbc-v4.3.2.asciidoc b/docs/versioned-plugins/inputs/jdbc-v4.3.2.asciidoc deleted file mode 100644 index 2c96afd5c..000000000 --- a/docs/versioned-plugins/inputs/jdbc-v4.3.2.asciidoc +++ /dev/null @@ -1,486 +0,0 @@ -:plugin: jdbc -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.3.2 -:release_date: 2017-12-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-jdbc/blob/v4.3.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Jdbc input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This plugin was created as a way to ingest data in any database -with a JDBC interface into Logstash. You can periodically schedule ingestion -using a cron syntax (see `schedule` setting) or run the query one time to load -data into Logstash. Each row in the resultset becomes a single event. -Columns in the resultset are converted into fields in the event. - -==== Drivers - -This plugin does not come packaged with JDBC driver libraries. The desired -jdbc driver library must be explicitly passed in to the plugin using the -`jdbc_driver_library` configuration option. - -==== Scheduling - -Input from this plugin can be scheduled to run periodically according to a specific -schedule. This scheduling syntax is powered by https://github.com/jmettraux/rufus-scheduler[rufus-scheduler]. -The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ). - -Examples: - -|========================================================== -| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. -| `0 * * * *` | will execute on the 0th minute of every hour every day. -| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. -|========================================================== - - -Further documentation describing this syntax can be found https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings[here]. - -==== State - -The plugin will persist the `sql_last_value` parameter in the form of a -metadata file stored in the configured `last_run_metadata_path`. Upon query execution, -this file will be updated with the current value of `sql_last_value`. Next time -the pipeline starts up, this value will be updated by reading from the file. If -`clean_run` is set to true, this value will be ignored and `sql_last_value` will be -set to Jan 1, 1970, or 0 if `use_column_value` is true, as if no query has ever been executed. - -==== Dealing With Large Result-sets - -Many JDBC drivers use the `fetch_size` parameter to limit how many -results are pre-fetched at a time from the cursor into the client's cache -before retrieving more results from the result-set. This is configured in -this plugin using the `jdbc_fetch_size` configuration option. No fetch size -is set by default in this plugin, so the specific driver's default size will -be used. - -==== Usage: - -Here is an example of setting up the plugin to fetch data from a MySQL database. -First, we place the appropriate JDBC driver library in our current -path (this can be placed anywhere on your filesystem). In this example, we connect to -the 'mydb' database using the user: 'mysql' and wish to input all rows in the 'songs' -table that match a specific artist. The following examples demonstrates a possible -Logstash configuration for this. The `schedule` option in this example will -instruct the plugin to execute this input statement on the minute, every minute. - -[source,ruby] ------------------------------------------------------------------------------- -input { - jdbc { - jdbc_driver_library => "mysql-connector-java-5.1.36-bin.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://localhost:3306/mydb" - jdbc_user => "mysql" - parameters => { "favorite_artist" => "Beethoven" } - schedule => "* * * * *" - statement => "SELECT * from songs where artist = :favorite_artist" - } -} ------------------------------------------------------------------------------- - -==== Configuring SQL statement - -A sql statement is required for this input. This can be passed-in via a -statement option in the form of a string, or read from a file (`statement_filepath`). File -option is typically used when the SQL statement is large or cumbersome to supply in the config. -The file option only supports one SQL statement. The plugin will only accept one of the options. -It cannot read a statement from a file as well as from the `statement` configuration parameter. - -==== Configuring multiple SQL statements - -Configuring multiple SQL statements is useful when there is a need to query and ingest data -from different database tables or views. It is possible to define separate Logstash -configuration files for each statement or to define multiple statements in a single configuration -file. When using multiple statements in a single Logstash configuration file, each statement -has to be defined as a separate jdbc input (including jdbc driver, connection string and other -required parameters). - -Please note that if any of the statements use the `sql_last_value` parameter (e.g. for -ingesting only data changed since last run), each input should define its own -`last_run_metadata_path` parameter. Failure to do so will result in undesired behaviour, as -all inputs will store their state to the same (default) metadata file, effectively -overwriting each other's `sql_last_value`. - -==== Predefined Parameters - -Some parameters are built-in and can be used from within your queries. -Here is the list: - -|========================================================== -|sql_last_value | The value used to calculate which rows to query. Before any query is run, -this is set to Thursday, 1 January 1970, or 0 if `use_column_value` is true and -`tracking_column` is set. It is updated accordingly after subsequent queries are run. -|========================================================== - -Example: -[source,ruby] ---------------------------------------------------------------------------------------------------- -input { - jdbc { - statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value" - use_column_value => true - tracking_column => "id" - # ... other configuration bits - } -} ---------------------------------------------------------------------------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jdbc Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-clean_run>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-columns_charset>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_default_timezone>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_fetch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_page_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password_filepath>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-last_run_metadata_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lowercase_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-record_last_run>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sequel_opts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sql_log_level>> |<>, one of `["fatal", "error", "warn", "info", "debug"]`|No -| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-statement_filepath>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-tracking_column>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tracking_column_type>> |<>, one of `["numeric", "timestamp"]`|No -| <<{version}-plugins-{type}s-{plugin}-use_column_value>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-clean_run"] -===== `clean_run` - - * Value type is <> - * Default value is `false` - -Whether the previous run state should be preserved - -[id="{version}-plugins-{type}s-{plugin}-columns_charset"] -===== `columns_charset` - - * Value type is <> - * Default value is `{}` - -The character encoding for specific columns. This option will override the `:charset` option -for the specified columns. - -Example: -[source,ruby] -------------------------------------------------------- -input { - jdbc { - ... - columns_charset => { "column0" => "ISO-8859-1" } - ... - } -} -------------------------------------------------------- -this will only convert column0 that has ISO-8859-1 as an original encoding. - -[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts"] -===== `connection_retry_attempts` - - * Value type is <> - * Default value is `1` - -Maximum number of times to try connecting to database - -[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time"] -===== `connection_retry_attempts_wait_time` - - * Value type is <> - * Default value is `0.5` - -Number of seconds to sleep between connection attempts - -[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] -===== `jdbc_connection_string` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC connection string - -[id="{version}-plugins-{type}s-{plugin}-jdbc_default_timezone"] -===== `jdbc_default_timezone` - - * Value type is <> - * There is no default value for this setting. - -Timezone conversion. -SQL does not allow for timezone data in timestamp fields. This plugin will automatically -convert your SQL timestamp fields to Logstash timestamps, in relative UTC time in ISO8601 format. - -Using this setting will manually assign a specified timezone offset, instead -of using the timezone setting of the local machine. You must use a canonical -timezone, *America/Denver*, for example. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] -===== `jdbc_driver_class` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC driver class to load, for exmaple, "org.apache.derby.jdbc.ClientDriver" -NB per https://github.com/logstash-plugins/logstash-input-jdbc/issues/43 if you are using -the Oracle JDBC driver (ojdbc6.jar) the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"` - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] -===== `jdbc_driver_library` - - * Value type is <> - * There is no default value for this setting. - -Tentative of abstracting JDBC logic to a mixin -for potential reuse in other plugins (input/output) -This method is called when someone includes this module -Add these methods to the 'base' given. -JDBC driver library path to third party driver library. In case of multiple libraries being -required you can pass them separated by a comma. - -If not provided, Plugin will look for the driver class in the Logstash Java classpath. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_fetch_size"] -===== `jdbc_fetch_size` - - * Value type is <> - * There is no default value for this setting. - -JDBC fetch size. if not provided, respective driver's default will be used - -[id="{version}-plugins-{type}s-{plugin}-jdbc_page_size"] -===== `jdbc_page_size` - - * Value type is <> - * Default value is `100000` - -JDBC page size - -[id="{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled"] -===== `jdbc_paging_enabled` - - * Value type is <> - * Default value is `false` - -JDBC enable paging - -This will cause a sql statement to be broken up into multiple queries. -Each query will use limits and offsets to collectively retrieve the full -result-set. The limit size is set with `jdbc_page_size`. - -Be aware that ordering is not guaranteed between queries. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] -===== `jdbc_password` - - * Value type is <> - * There is no default value for this setting. - -JDBC password - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password_filepath"] -===== `jdbc_password_filepath` - - * Value type is <> - * There is no default value for this setting. - -JDBC password filename - -[id="{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout"] -===== `jdbc_pool_timeout` - - * Value type is <> - * Default value is `5` - -Connection pool configuration. -The amount of seconds to wait to acquire a connection before raising a PoolTimeoutError (default 5) - -[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] -===== `jdbc_user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC user - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] -===== `jdbc_validate_connection` - - * Value type is <> - * Default value is `false` - -Connection pool configuration. -Validate connection before use. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] -===== `jdbc_validation_timeout` - - * Value type is <> - * Default value is `3600` - -Connection pool configuration. -How often to validate a connection (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-last_run_metadata_path"] -===== `last_run_metadata_path` - - * Value type is <> - * Default value is `"/home/ph/.logstash_jdbc_last_run"` - -Path to file with last run time - -[id="{version}-plugins-{type}s-{plugin}-lowercase_column_names"] -===== `lowercase_column_names` - - * Value type is <> - * Default value is `true` - -Whether to force the lowercasing of identifier fields - -[id="{version}-plugins-{type}s-{plugin}-parameters"] -===== `parameters` - - * Value type is <> - * Default value is `{}` - -Hash of query parameter, for example `{ "target_id" => "321" }` - -[id="{version}-plugins-{type}s-{plugin}-record_last_run"] -===== `record_last_run` - - * Value type is <> - * Default value is `true` - -Whether to save state or not in last_run_metadata_path - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically run statement, in Cron format -for example: "* * * * *" (execute query every minute, on the minute) - -There is no schedule by default. If no schedule is given, then the statement is run -exactly once. - -[id="{version}-plugins-{type}s-{plugin}-sequel_opts"] -===== `sequel_opts` - - * Value type is <> - * Default value is `{}` - -General/Vendor-specific Sequel configuration options. - -An example of an optional connection pool configuration - max_connections - The maximum number of connections the connection pool - -examples of vendor-specific options can be found in this -documentation page: https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc - -[id="{version}-plugins-{type}s-{plugin}-sql_log_level"] -===== `sql_log_level` - - * Value can be any of: `fatal`, `error`, `warn`, `info`, `debug` - * Default value is `"info"` - -Log level at which to log SQL queries, the accepted values are the common ones fatal, error, warn, -info and debug. The default value is info. - -[id="{version}-plugins-{type}s-{plugin}-statement"] -===== `statement` - - * Value type is <> - * There is no default value for this setting. - -If undefined, Logstash will complain, even if codec is unused. -Statement to execute - -To use parameters, use named parameter syntax. -For example: - -[source, ruby] ------------------------------------------------ -"SELECT * FROM MYTABLE WHERE id = :target_id" ------------------------------------------------ - -here, ":target_id" is a named parameter. You can configure named parameters -with the `parameters` setting. - -[id="{version}-plugins-{type}s-{plugin}-statement_filepath"] -===== `statement_filepath` - - * Value type is <> - * There is no default value for this setting. - -Path of file containing statement to execute - -[id="{version}-plugins-{type}s-{plugin}-tracking_column"] -===== `tracking_column` - - * Value type is <> - * There is no default value for this setting. - -If tracking column value rather than timestamp, the column whose value is to be tracked - -[id="{version}-plugins-{type}s-{plugin}-tracking_column_type"] -===== `tracking_column_type` - - * Value can be any of: `numeric`, `timestamp` - * Default value is `"numeric"` - -Type of tracking column. Currently only "numeric" and "timestamp" - -[id="{version}-plugins-{type}s-{plugin}-use_column_value"] -===== `use_column_value` - - * Value type is <> - * Default value is `false` - -Use an incremental column value rather than a timestamp - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jdbc-v4.3.3.asciidoc b/docs/versioned-plugins/inputs/jdbc-v4.3.3.asciidoc deleted file mode 100644 index 78dafe00a..000000000 --- a/docs/versioned-plugins/inputs/jdbc-v4.3.3.asciidoc +++ /dev/null @@ -1,486 +0,0 @@ -:plugin: jdbc -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.3.3 -:release_date: 2017-12-14 -:changelog_url: https://github.com/logstash-plugins/logstash-input-jdbc/blob/v4.3.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Jdbc input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This plugin was created as a way to ingest data in any database -with a JDBC interface into Logstash. You can periodically schedule ingestion -using a cron syntax (see `schedule` setting) or run the query one time to load -data into Logstash. Each row in the resultset becomes a single event. -Columns in the resultset are converted into fields in the event. - -==== Drivers - -This plugin does not come packaged with JDBC driver libraries. The desired -jdbc driver library must be explicitly passed in to the plugin using the -`jdbc_driver_library` configuration option. - -==== Scheduling - -Input from this plugin can be scheduled to run periodically according to a specific -schedule. This scheduling syntax is powered by https://github.com/jmettraux/rufus-scheduler[rufus-scheduler]. -The syntax is cron-like with some extensions specific to Rufus (e.g. timezone support ). - -Examples: - -|========================================================== -| `* 5 * 1-3 *` | will execute every minute of 5am every day of January through March. -| `0 * * * *` | will execute on the 0th minute of every hour every day. -| `0 6 * * * America/Chicago` | will execute at 6:00am (UTC/GMT -5) every day. -|========================================================== - - -Further documentation describing this syntax can be found https://github.com/jmettraux/rufus-scheduler#parsing-cronlines-and-time-strings[here]. - -==== State - -The plugin will persist the `sql_last_value` parameter in the form of a -metadata file stored in the configured `last_run_metadata_path`. Upon query execution, -this file will be updated with the current value of `sql_last_value`. Next time -the pipeline starts up, this value will be updated by reading from the file. If -`clean_run` is set to true, this value will be ignored and `sql_last_value` will be -set to Jan 1, 1970, or 0 if `use_column_value` is true, as if no query has ever been executed. - -==== Dealing With Large Result-sets - -Many JDBC drivers use the `fetch_size` parameter to limit how many -results are pre-fetched at a time from the cursor into the client's cache -before retrieving more results from the result-set. This is configured in -this plugin using the `jdbc_fetch_size` configuration option. No fetch size -is set by default in this plugin, so the specific driver's default size will -be used. - -==== Usage: - -Here is an example of setting up the plugin to fetch data from a MySQL database. -First, we place the appropriate JDBC driver library in our current -path (this can be placed anywhere on your filesystem). In this example, we connect to -the 'mydb' database using the user: 'mysql' and wish to input all rows in the 'songs' -table that match a specific artist. The following examples demonstrates a possible -Logstash configuration for this. The `schedule` option in this example will -instruct the plugin to execute this input statement on the minute, every minute. - -[source,ruby] ------------------------------------------------------------------------------- -input { - jdbc { - jdbc_driver_library => "mysql-connector-java-5.1.36-bin.jar" - jdbc_driver_class => "com.mysql.jdbc.Driver" - jdbc_connection_string => "jdbc:mysql://localhost:3306/mydb" - jdbc_user => "mysql" - parameters => { "favorite_artist" => "Beethoven" } - schedule => "* * * * *" - statement => "SELECT * from songs where artist = :favorite_artist" - } -} ------------------------------------------------------------------------------- - -==== Configuring SQL statement - -A sql statement is required for this input. This can be passed-in via a -statement option in the form of a string, or read from a file (`statement_filepath`). File -option is typically used when the SQL statement is large or cumbersome to supply in the config. -The file option only supports one SQL statement. The plugin will only accept one of the options. -It cannot read a statement from a file as well as from the `statement` configuration parameter. - -==== Configuring multiple SQL statements - -Configuring multiple SQL statements is useful when there is a need to query and ingest data -from different database tables or views. It is possible to define separate Logstash -configuration files for each statement or to define multiple statements in a single configuration -file. When using multiple statements in a single Logstash configuration file, each statement -has to be defined as a separate jdbc input (including jdbc driver, connection string and other -required parameters). - -Please note that if any of the statements use the `sql_last_value` parameter (e.g. for -ingesting only data changed since last run), each input should define its own -`last_run_metadata_path` parameter. Failure to do so will result in undesired behaviour, as -all inputs will store their state to the same (default) metadata file, effectively -overwriting each other's `sql_last_value`. - -==== Predefined Parameters - -Some parameters are built-in and can be used from within your queries. -Here is the list: - -|========================================================== -|sql_last_value | The value used to calculate which rows to query. Before any query is run, -this is set to Thursday, 1 January 1970, or 0 if `use_column_value` is true and -`tracking_column` is set. It is updated accordingly after subsequent queries are run. -|========================================================== - -Example: -[source,ruby] ---------------------------------------------------------------------------------------------------- -input { - jdbc { - statement => "SELECT id, mycolumn1, mycolumn2 FROM my_table WHERE id > :sql_last_value" - use_column_value => true - tracking_column => "id" - # ... other configuration bits - } -} ---------------------------------------------------------------------------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jdbc Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-clean_run>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-columns_charset>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_connection_string>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_default_timezone>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_class>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_driver_library>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_fetch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_page_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_password_filepath>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_user>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-jdbc_validate_connection>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-last_run_metadata_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lowercase_column_names>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-record_last_run>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sequel_opts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sql_log_level>> |<>, one of `["fatal", "error", "warn", "info", "debug"]`|No -| <<{version}-plugins-{type}s-{plugin}-statement>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-statement_filepath>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-tracking_column>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tracking_column_type>> |<>, one of `["numeric", "timestamp"]`|No -| <<{version}-plugins-{type}s-{plugin}-use_column_value>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-clean_run"] -===== `clean_run` - - * Value type is <> - * Default value is `false` - -Whether the previous run state should be preserved - -[id="{version}-plugins-{type}s-{plugin}-columns_charset"] -===== `columns_charset` - - * Value type is <> - * Default value is `{}` - -The character encoding for specific columns. This option will override the `:charset` option -for the specified columns. - -Example: -[source,ruby] -------------------------------------------------------- -input { - jdbc { - ... - columns_charset => { "column0" => "ISO-8859-1" } - ... - } -} -------------------------------------------------------- -this will only convert column0 that has ISO-8859-1 as an original encoding. - -[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts"] -===== `connection_retry_attempts` - - * Value type is <> - * Default value is `1` - -Maximum number of times to try connecting to database - -[id="{version}-plugins-{type}s-{plugin}-connection_retry_attempts_wait_time"] -===== `connection_retry_attempts_wait_time` - - * Value type is <> - * Default value is `0.5` - -Number of seconds to sleep between connection attempts - -[id="{version}-plugins-{type}s-{plugin}-jdbc_connection_string"] -===== `jdbc_connection_string` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC connection string - -[id="{version}-plugins-{type}s-{plugin}-jdbc_default_timezone"] -===== `jdbc_default_timezone` - - * Value type is <> - * There is no default value for this setting. - -Timezone conversion. -SQL does not allow for timezone data in timestamp fields. This plugin will automatically -convert your SQL timestamp fields to Logstash timestamps, in relative UTC time in ISO8601 format. - -Using this setting will manually assign a specified timezone offset, instead -of using the timezone setting of the local machine. You must use a canonical -timezone, *America/Denver*, for example. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_class"] -===== `jdbc_driver_class` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC driver class to load, for exmaple, "org.apache.derby.jdbc.ClientDriver" -NB per https://github.com/logstash-plugins/logstash-input-jdbc/issues/43 if you are using -the Oracle JDBC driver (ojdbc6.jar) the correct `jdbc_driver_class` is `"Java::oracle.jdbc.driver.OracleDriver"` - -[id="{version}-plugins-{type}s-{plugin}-jdbc_driver_library"] -===== `jdbc_driver_library` - - * Value type is <> - * There is no default value for this setting. - -Tentative of abstracting JDBC logic to a mixin -for potential reuse in other plugins (input/output) -This method is called when someone includes this module -Add these methods to the 'base' given. -JDBC driver library path to third party driver library. In case of multiple libraries being -required you can pass them separated by a comma. - -If not provided, Plugin will look for the driver class in the Logstash Java classpath. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_fetch_size"] -===== `jdbc_fetch_size` - - * Value type is <> - * There is no default value for this setting. - -JDBC fetch size. if not provided, respective driver's default will be used - -[id="{version}-plugins-{type}s-{plugin}-jdbc_page_size"] -===== `jdbc_page_size` - - * Value type is <> - * Default value is `100000` - -JDBC page size - -[id="{version}-plugins-{type}s-{plugin}-jdbc_paging_enabled"] -===== `jdbc_paging_enabled` - - * Value type is <> - * Default value is `false` - -JDBC enable paging - -This will cause a sql statement to be broken up into multiple queries. -Each query will use limits and offsets to collectively retrieve the full -result-set. The limit size is set with `jdbc_page_size`. - -Be aware that ordering is not guaranteed between queries. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password"] -===== `jdbc_password` - - * Value type is <> - * There is no default value for this setting. - -JDBC password - -[id="{version}-plugins-{type}s-{plugin}-jdbc_password_filepath"] -===== `jdbc_password_filepath` - - * Value type is <> - * There is no default value for this setting. - -JDBC password filename - -[id="{version}-plugins-{type}s-{plugin}-jdbc_pool_timeout"] -===== `jdbc_pool_timeout` - - * Value type is <> - * Default value is `5` - -Connection pool configuration. -The amount of seconds to wait to acquire a connection before raising a PoolTimeoutError (default 5) - -[id="{version}-plugins-{type}s-{plugin}-jdbc_user"] -===== `jdbc_user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JDBC user - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validate_connection"] -===== `jdbc_validate_connection` - - * Value type is <> - * Default value is `false` - -Connection pool configuration. -Validate connection before use. - -[id="{version}-plugins-{type}s-{plugin}-jdbc_validation_timeout"] -===== `jdbc_validation_timeout` - - * Value type is <> - * Default value is `3600` - -Connection pool configuration. -How often to validate a connection (in seconds) - -[id="{version}-plugins-{type}s-{plugin}-last_run_metadata_path"] -===== `last_run_metadata_path` - - * Value type is <> - * Default value is `"/home/ph/.logstash_jdbc_last_run"` - -Path to file with last run time - -[id="{version}-plugins-{type}s-{plugin}-lowercase_column_names"] -===== `lowercase_column_names` - - * Value type is <> - * Default value is `true` - -Whether to force the lowercasing of identifier fields - -[id="{version}-plugins-{type}s-{plugin}-parameters"] -===== `parameters` - - * Value type is <> - * Default value is `{}` - -Hash of query parameter, for example `{ "target_id" => "321" }` - -[id="{version}-plugins-{type}s-{plugin}-record_last_run"] -===== `record_last_run` - - * Value type is <> - * Default value is `true` - -Whether to save state or not in last_run_metadata_path - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically run statement, in Cron format -for example: "* * * * *" (execute query every minute, on the minute) - -There is no schedule by default. If no schedule is given, then the statement is run -exactly once. - -[id="{version}-plugins-{type}s-{plugin}-sequel_opts"] -===== `sequel_opts` - - * Value type is <> - * Default value is `{}` - -General/Vendor-specific Sequel configuration options. - -An example of an optional connection pool configuration - max_connections - The maximum number of connections the connection pool - -examples of vendor-specific options can be found in this -documentation page: https://github.com/jeremyevans/sequel/blob/master/doc/opening_databases.rdoc - -[id="{version}-plugins-{type}s-{plugin}-sql_log_level"] -===== `sql_log_level` - - * Value can be any of: `fatal`, `error`, `warn`, `info`, `debug` - * Default value is `"info"` - -Log level at which to log SQL queries, the accepted values are the common ones fatal, error, warn, -info and debug. The default value is info. - -[id="{version}-plugins-{type}s-{plugin}-statement"] -===== `statement` - - * Value type is <> - * There is no default value for this setting. - -If undefined, Logstash will complain, even if codec is unused. -Statement to execute - -To use parameters, use named parameter syntax. -For example: - -[source, ruby] ------------------------------------------------ -"SELECT * FROM MYTABLE WHERE id = :target_id" ------------------------------------------------ - -here, ":target_id" is a named parameter. You can configure named parameters -with the `parameters` setting. - -[id="{version}-plugins-{type}s-{plugin}-statement_filepath"] -===== `statement_filepath` - - * Value type is <> - * There is no default value for this setting. - -Path of file containing statement to execute - -[id="{version}-plugins-{type}s-{plugin}-tracking_column"] -===== `tracking_column` - - * Value type is <> - * There is no default value for this setting. - -If tracking column value rather than timestamp, the column whose value is to be tracked - -[id="{version}-plugins-{type}s-{plugin}-tracking_column_type"] -===== `tracking_column_type` - - * Value can be any of: `numeric`, `timestamp` - * Default value is `"numeric"` - -Type of tracking column. Currently only "numeric" and "timestamp" - -[id="{version}-plugins-{type}s-{plugin}-use_column_value"] -===== `use_column_value` - - * Value type is <> - * Default value is `false` - -Use an incremental column value rather than a timestamp - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jms-index.asciidoc b/docs/versioned-plugins/inputs/jms-index.asciidoc deleted file mode 100644 index d968410fa..000000000 --- a/docs/versioned-plugins/inputs/jms-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: jms -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::jms-v3.0.4.asciidoc[] -include::jms-v3.0.3.asciidoc[] -include::jms-v3.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/jms-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/jms-v3.0.2.asciidoc deleted file mode 100644 index f373a3951..000000000 --- a/docs/versioned-plugins/inputs/jms-v3.0.2.asciidoc +++ /dev/null @@ -1,259 +0,0 @@ -:plugin: jms -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-jms/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Jms input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from a Jms Broker. Supports both Jms Queues and Topics. - -For more information about Jms, see -For more information about the Ruby Gem used, see -Here is a config example to pull from a queue: - jms { - include_header => false - include_properties => false - include_body => true - use_jms_timestamp => false - interval => 10 - destination => "myqueue" - pub-sub => false - yaml_file => "~/jms.yml" - yaml_section => "mybroker" - } - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jms Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-broker_url>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-factory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_body>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_header>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_properties>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jndi_context>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jndi_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pub_sub>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-require_jars>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-runner>> |<>, one of `["consumer", "async", "thread"]`|No -| <<{version}-plugins-{type}s-{plugin}-selector>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_jms_timestamp>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-yaml_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-yaml_section>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-broker_url"] -===== `broker_url` - - * Value type is <> - * There is no default value for this setting. - -Url to use when connecting to the JMS provider - -[id="{version}-plugins-{type}s-{plugin}-destination"] -===== `destination` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Name of the destination queue or topic to use. - -[id="{version}-plugins-{type}s-{plugin}-factory"] -===== `factory` - - * Value type is <> - * There is no default value for this setting. - -Name of JMS Provider Factory class - -[id="{version}-plugins-{type}s-{plugin}-include_body"] -===== `include_body` - - * Value type is <> - * Default value is `true` - -Include JMS Message Body in the event -Supports TextMessage, MapMessage and ByteMessage -If the JMS Message is a TextMessage or ByteMessage, then the value will be in the "message" field of the event -If the JMS Message is a MapMessage, then all the key/value pairs will be added in the Hashmap of the event -StreamMessage and ObjectMessage are not supported - -[id="{version}-plugins-{type}s-{plugin}-include_header"] -===== `include_header` - - * Value type is <> - * Default value is `true` - -A JMS message has three parts : - Message Headers (required) - Message Properties (optional) - Message Bodies (optional) -You can tell the input plugin which parts should be included in the event produced by Logstash - -Include JMS Message Header Field values in the event - -[id="{version}-plugins-{type}s-{plugin}-include_properties"] -===== `include_properties` - - * Value type is <> - * Default value is `true` - -Include JMS Message Properties Field values in the event - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `10` - -Polling interval in seconds. -This is the time sleeping between asks to a consumed Queue. -This parameter has non influence in the case of a subcribed Topic. - -[id="{version}-plugins-{type}s-{plugin}-jndi_context"] -===== `jndi_context` - - * Value type is <> - * There is no default value for this setting. - -Mandatory if jndi lookup is being used, -contains details on how to connect to JNDI server - -[id="{version}-plugins-{type}s-{plugin}-jndi_name"] -===== `jndi_name` - - * Value type is <> - * There is no default value for this setting. - -Name of JNDI entry at which the Factory can be found - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to use when connecting to the JMS provider - -[id="{version}-plugins-{type}s-{plugin}-pub_sub"] -===== `pub_sub` - - * Value type is <> - * Default value is `false` - -If pub-sub (topic) style should be used. - -[id="{version}-plugins-{type}s-{plugin}-require_jars"] -===== `require_jars` - - * Value type is <> - * There is no default value for this setting. - -If you do not use an yaml configuration use either the factory or jndi_name. -An optional array of Jar file names to load for the specified -JMS provider. By using this option it is not necessary -to put all the JMS Provider specific jar files into the -java CLASSPATH prior to starting Logstash. - -[id="{version}-plugins-{type}s-{plugin}-runner"] -===== `runner` - - * Value can be any of: `consumer`, `async`, `thread` - * Default value is `"consumer"` - -Choose an implementation of the run block. Value can be either consumer, async or thread - -[id="{version}-plugins-{type}s-{plugin}-selector"] -===== `selector` - - * Value type is <> - * There is no default value for this setting. - -Set the selector to use to get messages off the queue or topic - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `60` - -Initial connection timeout in seconds. - -[id="{version}-plugins-{type}s-{plugin}-use_jms_timestamp"] -===== `use_jms_timestamp` - - * Value type is <> - * Default value is `false` - -Convert the JMSTimestamp header field to the @timestamp value of the event - -[id="{version}-plugins-{type}s-{plugin}-username"] -===== `username` - - * Value type is <> - * There is no default value for this setting. - -Username to connect to JMS provider with - -[id="{version}-plugins-{type}s-{plugin}-yaml_file"] -===== `yaml_file` - - * Value type is <> - * There is no default value for this setting. - -Yaml config file - -[id="{version}-plugins-{type}s-{plugin}-yaml_section"] -===== `yaml_section` - - * Value type is <> - * There is no default value for this setting. - -Yaml config file section name -For some known examples, see: [Example jms.yml](https://github.com/reidmorrison/jruby-jms/blob/master/examples/jms.yml) - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jms-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/jms-v3.0.3.asciidoc deleted file mode 100644 index 9a674a00f..000000000 --- a/docs/versioned-plugins/inputs/jms-v3.0.3.asciidoc +++ /dev/null @@ -1,259 +0,0 @@ -:plugin: jms -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-jms/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Jms input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from a Jms Broker. Supports both Jms Queues and Topics. - -For more information about Jms, see -For more information about the Ruby Gem used, see -Here is a config example to pull from a queue: - jms { - include_header => false - include_properties => false - include_body => true - use_jms_timestamp => false - interval => 10 - destination => "myqueue" - pub-sub => false - yaml_file => "~/jms.yml" - yaml_section => "mybroker" - } - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jms Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-broker_url>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-factory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_body>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_header>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_properties>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jndi_context>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jndi_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pub_sub>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-require_jars>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-runner>> |<>, one of `["consumer", "async", "thread"]`|No -| <<{version}-plugins-{type}s-{plugin}-selector>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_jms_timestamp>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-yaml_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-yaml_section>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-broker_url"] -===== `broker_url` - - * Value type is <> - * There is no default value for this setting. - -Url to use when connecting to the JMS provider - -[id="{version}-plugins-{type}s-{plugin}-destination"] -===== `destination` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Name of the destination queue or topic to use. - -[id="{version}-plugins-{type}s-{plugin}-factory"] -===== `factory` - - * Value type is <> - * There is no default value for this setting. - -Name of JMS Provider Factory class - -[id="{version}-plugins-{type}s-{plugin}-include_body"] -===== `include_body` - - * Value type is <> - * Default value is `true` - -Include JMS Message Body in the event -Supports TextMessage, MapMessage and ByteMessage -If the JMS Message is a TextMessage or ByteMessage, then the value will be in the "message" field of the event -If the JMS Message is a MapMessage, then all the key/value pairs will be added in the Hashmap of the event -StreamMessage and ObjectMessage are not supported - -[id="{version}-plugins-{type}s-{plugin}-include_header"] -===== `include_header` - - * Value type is <> - * Default value is `true` - -A JMS message has three parts : - Message Headers (required) - Message Properties (optional) - Message Bodies (optional) -You can tell the input plugin which parts should be included in the event produced by Logstash - -Include JMS Message Header Field values in the event - -[id="{version}-plugins-{type}s-{plugin}-include_properties"] -===== `include_properties` - - * Value type is <> - * Default value is `true` - -Include JMS Message Properties Field values in the event - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `10` - -Polling interval in seconds. -This is the time sleeping between asks to a consumed Queue. -This parameter has non influence in the case of a subcribed Topic. - -[id="{version}-plugins-{type}s-{plugin}-jndi_context"] -===== `jndi_context` - - * Value type is <> - * There is no default value for this setting. - -Mandatory if jndi lookup is being used, -contains details on how to connect to JNDI server - -[id="{version}-plugins-{type}s-{plugin}-jndi_name"] -===== `jndi_name` - - * Value type is <> - * There is no default value for this setting. - -Name of JNDI entry at which the Factory can be found - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to use when connecting to the JMS provider - -[id="{version}-plugins-{type}s-{plugin}-pub_sub"] -===== `pub_sub` - - * Value type is <> - * Default value is `false` - -If pub-sub (topic) style should be used. - -[id="{version}-plugins-{type}s-{plugin}-require_jars"] -===== `require_jars` - - * Value type is <> - * There is no default value for this setting. - -If you do not use an yaml configuration use either the factory or jndi_name. -An optional array of Jar file names to load for the specified -JMS provider. By using this option it is not necessary -to put all the JMS Provider specific jar files into the -java CLASSPATH prior to starting Logstash. - -[id="{version}-plugins-{type}s-{plugin}-runner"] -===== `runner` - - * Value can be any of: `consumer`, `async`, `thread` - * Default value is `"consumer"` - -Choose an implementation of the run block. Value can be either consumer, async or thread - -[id="{version}-plugins-{type}s-{plugin}-selector"] -===== `selector` - - * Value type is <> - * There is no default value for this setting. - -Set the selector to use to get messages off the queue or topic - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `60` - -Initial connection timeout in seconds. - -[id="{version}-plugins-{type}s-{plugin}-use_jms_timestamp"] -===== `use_jms_timestamp` - - * Value type is <> - * Default value is `false` - -Convert the JMSTimestamp header field to the @timestamp value of the event - -[id="{version}-plugins-{type}s-{plugin}-username"] -===== `username` - - * Value type is <> - * There is no default value for this setting. - -Username to connect to JMS provider with - -[id="{version}-plugins-{type}s-{plugin}-yaml_file"] -===== `yaml_file` - - * Value type is <> - * There is no default value for this setting. - -Yaml config file - -[id="{version}-plugins-{type}s-{plugin}-yaml_section"] -===== `yaml_section` - - * Value type is <> - * There is no default value for this setting. - -Yaml config file section name -For some known examples, see: [Example jms.yml](https://github.com/reidmorrison/jruby-jms/blob/master/examples/jms.yml) - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jms-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/jms-v3.0.4.asciidoc deleted file mode 100644 index 232dcdadc..000000000 --- a/docs/versioned-plugins/inputs/jms-v3.0.4.asciidoc +++ /dev/null @@ -1,259 +0,0 @@ -:plugin: jms -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-jms/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Jms input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from a Jms Broker. Supports both Jms Queues and Topics. - -For more information about Jms, see -For more information about the Ruby Gem used, see -Here is a config example to pull from a queue: - jms { - include_header => false - include_properties => false - include_body => true - use_jms_timestamp => false - interval => 10 - destination => "myqueue" - pub-sub => false - yaml_file => "~/jms.yml" - yaml_section => "mybroker" - } - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jms Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-broker_url>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-factory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_body>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_header>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_properties>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jndi_context>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jndi_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pub_sub>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-require_jars>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-runner>> |<>, one of `["consumer", "async", "thread"]`|No -| <<{version}-plugins-{type}s-{plugin}-selector>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_jms_timestamp>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-yaml_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-yaml_section>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-broker_url"] -===== `broker_url` - - * Value type is <> - * There is no default value for this setting. - -Url to use when connecting to the JMS provider - -[id="{version}-plugins-{type}s-{plugin}-destination"] -===== `destination` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Name of the destination queue or topic to use. - -[id="{version}-plugins-{type}s-{plugin}-factory"] -===== `factory` - - * Value type is <> - * There is no default value for this setting. - -Name of JMS Provider Factory class - -[id="{version}-plugins-{type}s-{plugin}-include_body"] -===== `include_body` - - * Value type is <> - * Default value is `true` - -Include JMS Message Body in the event -Supports TextMessage, MapMessage and ByteMessage -If the JMS Message is a TextMessage or ByteMessage, then the value will be in the "message" field of the event -If the JMS Message is a MapMessage, then all the key/value pairs will be added in the Hashmap of the event -StreamMessage and ObjectMessage are not supported - -[id="{version}-plugins-{type}s-{plugin}-include_header"] -===== `include_header` - - * Value type is <> - * Default value is `true` - -A JMS message has three parts : - Message Headers (required) - Message Properties (optional) - Message Bodies (optional) -You can tell the input plugin which parts should be included in the event produced by Logstash - -Include JMS Message Header Field values in the event - -[id="{version}-plugins-{type}s-{plugin}-include_properties"] -===== `include_properties` - - * Value type is <> - * Default value is `true` - -Include JMS Message Properties Field values in the event - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `10` - -Polling interval in seconds. -This is the time sleeping between asks to a consumed Queue. -This parameter has non influence in the case of a subcribed Topic. - -[id="{version}-plugins-{type}s-{plugin}-jndi_context"] -===== `jndi_context` - - * Value type is <> - * There is no default value for this setting. - -Mandatory if jndi lookup is being used, -contains details on how to connect to JNDI server - -[id="{version}-plugins-{type}s-{plugin}-jndi_name"] -===== `jndi_name` - - * Value type is <> - * There is no default value for this setting. - -Name of JNDI entry at which the Factory can be found - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to use when connecting to the JMS provider - -[id="{version}-plugins-{type}s-{plugin}-pub_sub"] -===== `pub_sub` - - * Value type is <> - * Default value is `false` - -If pub-sub (topic) style should be used. - -[id="{version}-plugins-{type}s-{plugin}-require_jars"] -===== `require_jars` - - * Value type is <> - * There is no default value for this setting. - -If you do not use an yaml configuration use either the factory or jndi_name. -An optional array of Jar file names to load for the specified -JMS provider. By using this option it is not necessary -to put all the JMS Provider specific jar files into the -java CLASSPATH prior to starting Logstash. - -[id="{version}-plugins-{type}s-{plugin}-runner"] -===== `runner` - - * Value can be any of: `consumer`, `async`, `thread` - * Default value is `"consumer"` - -Choose an implementation of the run block. Value can be either consumer, async or thread - -[id="{version}-plugins-{type}s-{plugin}-selector"] -===== `selector` - - * Value type is <> - * There is no default value for this setting. - -Set the selector to use to get messages off the queue or topic - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `60` - -Initial connection timeout in seconds. - -[id="{version}-plugins-{type}s-{plugin}-use_jms_timestamp"] -===== `use_jms_timestamp` - - * Value type is <> - * Default value is `false` - -Convert the JMSTimestamp header field to the @timestamp value of the event - -[id="{version}-plugins-{type}s-{plugin}-username"] -===== `username` - - * Value type is <> - * There is no default value for this setting. - -Username to connect to JMS provider with - -[id="{version}-plugins-{type}s-{plugin}-yaml_file"] -===== `yaml_file` - - * Value type is <> - * There is no default value for this setting. - -Yaml config file - -[id="{version}-plugins-{type}s-{plugin}-yaml_section"] -===== `yaml_section` - - * Value type is <> - * There is no default value for this setting. - -Yaml config file section name -For some known examples, see: [Example jms.yml](https://github.com/reidmorrison/jruby-jms/blob/master/examples/jms.yml) - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jmx-index.asciidoc b/docs/versioned-plugins/inputs/jmx-index.asciidoc deleted file mode 100644 index 9c7ee4697..000000000 --- a/docs/versioned-plugins/inputs/jmx-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: jmx -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-14 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::jmx-v3.0.4.asciidoc[] -include::jmx-v3.0.3.asciidoc[] -include::jmx-v3.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/jmx-pipe-index.asciidoc b/docs/versioned-plugins/inputs/jmx-pipe-index.asciidoc deleted file mode 100644 index 0bccdd2a5..000000000 --- a/docs/versioned-plugins/inputs/jmx-pipe-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: jmx-pipe -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/inputs/jmx-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/jmx-v3.0.2.asciidoc deleted file mode 100644 index 213198633..000000000 --- a/docs/versioned-plugins/inputs/jmx-v3.0.2.asciidoc +++ /dev/null @@ -1,157 +0,0 @@ -:plugin: jmx -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-jmx/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Jmx input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input plugin permits to retrieve metrics from remote Java applications using JMX. -Every `polling_frequency`, it scans a folder containing json configuration -files describing JVMs to monitor with metrics to retrieve. -Then a pool of threads will retrieve metrics and create events. - -## The configuration: - -In Logstash configuration, you must set the polling frequency, -the number of thread used to poll metrics and a directory absolute path containing -json files with the configuration per jvm of metrics to retrieve. -Logstash input configuration example: -[source,ruby] - jmx { - //Required - path => "/apps/logstash_conf/jmxconf" - //Optional, default 60s - polling_frequency => 15 - type => "jmx" - //Optional, default 4 - nb_thread => 4 - } - -Json JMX configuration example: -[source,js] - { - //Required, JMX listening host/ip - "host" : "192.168.1.2", - //Required, JMX listening port - "port" : 1335, - //Optional, the username to connect to JMX - "username" : "user", - //Optional, the password to connect to JMX - "password": "pass", - //Optional, use this alias as a prefix in the metric name. If not set use _ - "alias" : "test.homeserver.elasticsearch", - //Required, list of JMX metrics to retrieve - "queries" : [ - { - //Required, the object name of Mbean to request - "object_name" : "java.lang:type=Memory", - //Optional, use this alias in the metrics value instead of the object_name - "object_alias" : "Memory" - }, { - "object_name" : "java.lang:type=Runtime", - //Optional, set of attributes to retrieve. If not set retrieve - //all metrics available on the configured object_name. - "attributes" : [ "Uptime", "StartTime" ], - "object_alias" : "Runtime" - }, { - //object_name can be configured with * to retrieve all matching Mbeans - "object_name" : "java.lang:type=GarbageCollector,name=*", - "attributes" : [ "CollectionCount", "CollectionTime" ], - //object_alias can be based on specific value from the object_name thanks to ${}. - //In this case ${type} will be replaced by GarbageCollector... - "object_alias" : "${type}.${name}" - }, { - "object_name" : "java.nio:type=BufferPool,name=*", - "object_alias" : "${type}.${name}" - } ] - } - -Here are examples of generated events. When returned metrics value type is -number/boolean it is stored in `metric_value_number` event field -otherwise it is stored in `metric_value_string` event field. -[source,ruby] - { - "@version" => "1", - "@timestamp" => "2014-02-18T20:57:27.688Z", - "host" => "192.168.1.2", - "path" => "/apps/logstash_conf/jmxconf", - "type" => "jmx", - "metric_path" => "test.homeserver.elasticsearch.GarbageCollector.ParNew.CollectionCount", - "metric_value_number" => 2212 - } - -[source,ruby] - { - "@version" => "1", - "@timestamp" => "2014-02-18T20:58:06.376Z", - "host" => "localhost", - "path" => "/apps/logstash_conf/jmxconf", - "type" => "jmx", - "metric_path" => "test.homeserver.elasticsearch.BufferPool.mapped.ObjectName", - "metric_value_string" => "java.nio:type=BufferPool,name=mapped" - } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jmx Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-nb_thread>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-polling_frequency>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-nb_thread"] -===== `nb_thread` - - * Value type is <> - * Default value is `4` - -Indicate number of thread launched to retrieve metrics - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Path where json conf files are stored - -[id="{version}-plugins-{type}s-{plugin}-polling_frequency"] -===== `polling_frequency` - - * Value type is <> - * Default value is `60` - -Indicate interval between two jmx metrics retrieval -(in s) - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jmx-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/jmx-v3.0.3.asciidoc deleted file mode 100644 index cad0a65b4..000000000 --- a/docs/versioned-plugins/inputs/jmx-v3.0.3.asciidoc +++ /dev/null @@ -1,157 +0,0 @@ -:plugin: jmx -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-jmx/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Jmx input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input plugin permits to retrieve metrics from remote Java applications using JMX. -Every `polling_frequency`, it scans a folder containing json configuration -files describing JVMs to monitor with metrics to retrieve. -Then a pool of threads will retrieve metrics and create events. - -## The configuration: - -In Logstash configuration, you must set the polling frequency, -the number of thread used to poll metrics and a directory absolute path containing -json files with the configuration per jvm of metrics to retrieve. -Logstash input configuration example: -[source,ruby] - jmx { - //Required - path => "/apps/logstash_conf/jmxconf" - //Optional, default 60s - polling_frequency => 15 - type => "jmx" - //Optional, default 4 - nb_thread => 4 - } - -Json JMX configuration example: -[source,js] - { - //Required, JMX listening host/ip - "host" : "192.168.1.2", - //Required, JMX listening port - "port" : 1335, - //Optional, the username to connect to JMX - "username" : "user", - //Optional, the password to connect to JMX - "password": "pass", - //Optional, use this alias as a prefix in the metric name. If not set use _ - "alias" : "test.homeserver.elasticsearch", - //Required, list of JMX metrics to retrieve - "queries" : [ - { - //Required, the object name of Mbean to request - "object_name" : "java.lang:type=Memory", - //Optional, use this alias in the metrics value instead of the object_name - "object_alias" : "Memory" - }, { - "object_name" : "java.lang:type=Runtime", - //Optional, set of attributes to retrieve. If not set retrieve - //all metrics available on the configured object_name. - "attributes" : [ "Uptime", "StartTime" ], - "object_alias" : "Runtime" - }, { - //object_name can be configured with * to retrieve all matching Mbeans - "object_name" : "java.lang:type=GarbageCollector,name=*", - "attributes" : [ "CollectionCount", "CollectionTime" ], - //object_alias can be based on specific value from the object_name thanks to ${}. - //In this case ${type} will be replaced by GarbageCollector... - "object_alias" : "${type}.${name}" - }, { - "object_name" : "java.nio:type=BufferPool,name=*", - "object_alias" : "${type}.${name}" - } ] - } - -Here are examples of generated events. When returned metrics value type is -number/boolean it is stored in `metric_value_number` event field -otherwise it is stored in `metric_value_string` event field. -[source,ruby] - { - "@version" => "1", - "@timestamp" => "2014-02-18T20:57:27.688Z", - "host" => "192.168.1.2", - "path" => "/apps/logstash_conf/jmxconf", - "type" => "jmx", - "metric_path" => "test.homeserver.elasticsearch.GarbageCollector.ParNew.CollectionCount", - "metric_value_number" => 2212 - } - -[source,ruby] - { - "@version" => "1", - "@timestamp" => "2014-02-18T20:58:06.376Z", - "host" => "localhost", - "path" => "/apps/logstash_conf/jmxconf", - "type" => "jmx", - "metric_path" => "test.homeserver.elasticsearch.BufferPool.mapped.ObjectName", - "metric_value_string" => "java.nio:type=BufferPool,name=mapped" - } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jmx Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-nb_thread>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-polling_frequency>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-nb_thread"] -===== `nb_thread` - - * Value type is <> - * Default value is `4` - -Indicate number of thread launched to retrieve metrics - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Path where json conf files are stored - -[id="{version}-plugins-{type}s-{plugin}-polling_frequency"] -===== `polling_frequency` - - * Value type is <> - * Default value is `60` - -Indicate interval between two jmx metrics retrieval -(in s) - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/jmx-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/jmx-v3.0.4.asciidoc deleted file mode 100644 index 5a8892425..000000000 --- a/docs/versioned-plugins/inputs/jmx-v3.0.4.asciidoc +++ /dev/null @@ -1,157 +0,0 @@ -:plugin: jmx -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-11-14 -:changelog_url: https://github.com/logstash-plugins/logstash-input-jmx/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Jmx input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input plugin permits to retrieve metrics from remote Java applications using JMX. -Every `polling_frequency`, it scans a folder containing json configuration -files describing JVMs to monitor with metrics to retrieve. -Then a pool of threads will retrieve metrics and create events. - -## The configuration: - -In Logstash configuration, you must set the polling frequency, -the number of thread used to poll metrics and a directory absolute path containing -json files with the configuration per jvm of metrics to retrieve. -Logstash input configuration example: -[source,ruby] - jmx { - //Required - path => "/apps/logstash_conf/jmxconf" - //Optional, default 60s - polling_frequency => 15 - type => "jmx" - //Optional, default 4 - nb_thread => 4 - } - -Json JMX configuration example: -[source,js] - { - //Required, JMX listening host/ip - "host" : "192.168.1.2", - //Required, JMX listening port - "port" : 1335, - //Optional, the username to connect to JMX - "username" : "user", - //Optional, the password to connect to JMX - "password": "pass", - //Optional, use this alias as a prefix in the metric name. If not set use _ - "alias" : "test.homeserver.elasticsearch", - //Required, list of JMX metrics to retrieve - "queries" : [ - { - //Required, the object name of Mbean to request - "object_name" : "java.lang:type=Memory", - //Optional, use this alias in the metrics value instead of the object_name - "object_alias" : "Memory" - }, { - "object_name" : "java.lang:type=Runtime", - //Optional, set of attributes to retrieve. If not set retrieve - //all metrics available on the configured object_name. - "attributes" : [ "Uptime", "StartTime" ], - "object_alias" : "Runtime" - }, { - //object_name can be configured with * to retrieve all matching Mbeans - "object_name" : "java.lang:type=GarbageCollector,name=*", - "attributes" : [ "CollectionCount", "CollectionTime" ], - //object_alias can be based on specific value from the object_name thanks to ${}. - //In this case ${type} will be replaced by GarbageCollector... - "object_alias" : "${type}.${name}" - }, { - "object_name" : "java.nio:type=BufferPool,name=*", - "object_alias" : "${type}.${name}" - } ] - } - -Here are examples of generated events. When returned metrics value type is -number/boolean it is stored in `metric_value_number` event field -otherwise it is stored in `metric_value_string` event field. -[source,ruby] - { - "@version" => "1", - "@timestamp" => "2014-02-18T20:57:27.688Z", - "host" => "192.168.1.2", - "path" => "/apps/logstash_conf/jmxconf", - "type" => "jmx", - "metric_path" => "test.homeserver.elasticsearch.GarbageCollector.ParNew.CollectionCount", - "metric_value_number" => 2212 - } - -[source,ruby] - { - "@version" => "1", - "@timestamp" => "2014-02-18T20:58:06.376Z", - "host" => "localhost", - "path" => "/apps/logstash_conf/jmxconf", - "type" => "jmx", - "metric_path" => "test.homeserver.elasticsearch.BufferPool.mapped.ObjectName", - "metric_value_string" => "java.nio:type=BufferPool,name=mapped" - } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jmx Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-nb_thread>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-polling_frequency>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-nb_thread"] -===== `nb_thread` - - * Value type is <> - * Default value is `4` - -Indicate number of thread launched to retrieve metrics - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Path where json conf files are stored - -[id="{version}-plugins-{type}s-{plugin}-polling_frequency"] -===== `polling_frequency` - - * Value type is <> - * Default value is `60` - -Indicate interval between two jmx metrics retrieval -(in s) - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/journald-index.asciidoc b/docs/versioned-plugins/inputs/journald-index.asciidoc deleted file mode 100644 index 80cd8207f..000000000 --- a/docs/versioned-plugins/inputs/journald-index.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -:plugin: journald -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-06-23 -|======================================================================= - -include::journald-v2.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/journald-v2.0.1.asciidoc b/docs/versioned-plugins/inputs/journald-v2.0.1.asciidoc deleted file mode 100644 index 957673266..000000000 --- a/docs/versioned-plugins/inputs/journald-v2.0.1.asciidoc +++ /dev/null @@ -1,152 +0,0 @@ -:plugin: journald -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-journald/blob/v2.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Journald input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Pull events from a local systemd journal. - -See requirements https://github.com/ledbettj/systemd-journal - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Journald Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-filter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flags>> |<>, one of `[0, 1, 2, 4]`|No -| <<{version}-plugins-{type}s-{plugin}-lowercase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-seekto>> |<>, one of `["head", "tail"]`|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_write_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-thisboot>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-wait_timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-filter"] -===== `filter` - - * Value type is <> - * Default value is `{}` - -Filter on events. Not heavily tested. - - -[id="{version}-plugins-{type}s-{plugin}-flags"] -===== `flags` - - * Value can be any of: `0`, `1`, `2`, `4` - * Default value is `0` - -System journal flags -0 = all avalable -1 = local only -2 = runtime only -4 = system only - - -[id="{version}-plugins-{type}s-{plugin}-lowercase"] -===== `lowercase` - - * Value type is <> - * Default value is `false` - -Lowercase annoying UPPERCASE fieldnames. (May clobber existing fields) - - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * Value type is <> - * Default value is `"/var/log/journal"` - -Path to read journal files from - - -[id="{version}-plugins-{type}s-{plugin}-seekto"] -===== `seekto` - - * Value can be any of: `head`, `tail` - * Default value is `"tail"` - -Where in the journal to start capturing logs -Options: head, tail - -[id="{version}-plugins-{type}s-{plugin}-sincedb_path"] -===== `sincedb_path` - - * Value type is <> - * There is no default value for this setting. - -Where to write the sincedb database (keeps track of the current -position of the journal). The default will write -the sincedb file to matching `$HOME/.sincedb_journal` - - -[id="{version}-plugins-{type}s-{plugin}-sincedb_write_interval"] -===== `sincedb_write_interval` - - * Value type is <> - * Default value is `15` - -How often (in seconds) to write a since database with the current position of -the journal. - - -[id="{version}-plugins-{type}s-{plugin}-thisboot"] -===== `thisboot` - - * Value type is <> - * Default value is `true` - -Filter logs since the system booted (only relevant with seekto => "head") - - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - -[id="{version}-plugins-{type}s-{plugin}-wait_timeout"] -===== `wait_timeout` - - * Value type is <> - * Default value is `3000000` - -The max timeout in microsends to wait for new events from the journal. -Set to -1 to wait indefinitely. Setting this to a large value will -result in delayed shutdown of the plugin. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/kafka-index.asciidoc b/docs/versioned-plugins/inputs/kafka-index.asciidoc deleted file mode 100644 index 8c58188e6..000000000 --- a/docs/versioned-plugins/inputs/kafka-index.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -:plugin: kafka -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2018-01-05 -| <> | 2017-08-15 -| <> | 2017-08-01 -| <> | 2017-07-18 -| <> | 2017-07-13 -| <> | 2017-07-11 -| <> | 2017-06-23 -| <> | 2017-05-11 -|======================================================================= - -include::kafka-v8.0.4.asciidoc[] -include::kafka-v8.0.2.asciidoc[] -include::kafka-v8.0.0.asciidoc[] -include::kafka-v7.0.0.asciidoc[] -include::kafka-v6.3.4.asciidoc[] -include::kafka-v6.3.3.asciidoc[] -include::kafka-v6.3.2.asciidoc[] -include::kafka-v6.3.0.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/kafka-v6.3.0.asciidoc b/docs/versioned-plugins/inputs/kafka-v6.3.0.asciidoc deleted file mode 100644 index 944d1c737..000000000 --- a/docs/versioned-plugins/inputs/kafka-v6.3.0.asciidoc +++ /dev/null @@ -1,551 +0,0 @@ -:plugin: kafka -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v6.3.0 -:release_date: 2017-05-11 -:changelog_url: https://github.com/logstash-plugins/logstash-input-kafka/blob/v6.3.0/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Kafka - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will read events from a Kafka topic. It uses the 0.10 version of -the consumer API provided by Kafka to read messages from the broker. - -Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination -of Logstash and the Kafka input plugin: - -[options="header"] -|========================================================== -|Kafka Client Version |Logstash Version |Plugin Version |Why? -|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular -|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) -|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) -|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker -|0.10.1.x |2.4.x - 5.x.x | 6.x.x | -|========================================================== - -NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should -upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker -is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. - -This input supports connecting to Kafka over: - -* SSL (requires plugin version 3.0.0 or later) -* Kerberos SASL (requires plugin version 5.1.0 or later) - -By default security is disabled but can be turned on as needed. - -The Logstash Kafka consumer handles group management and uses the default offset management -strategy using Kafka topics. - -Logstash instances by default form a single logical group to subscribe to Kafka topics -Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively, -you could run multiple Logstash instances with the same `group_id` to spread the load across -physical machines. Messages in a topic will be distributed to all Logstash instances with -the same `group_id`. - -Ideally you should have as many threads as the number of partitions for a perfect balance -- -more threads than partitions means that some threads will be idle - -For more information see http://kafka.apache.org/documentation.html#theconsumer - -Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kafka Input Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-auto_offset_reset>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-check_crcs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connections_max_idle_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-consumer_threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-decorate_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-enable_auto_commit>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_internal_topics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_max_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_min_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-group_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-key_deserializer_class>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_poll_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_poll_records>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-partition_assignment_strategy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-poll_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No -| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topics_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-value_deserializer_class>> |<>|No -|======================================================================= - -Also see <> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms"] -===== `auto_commit_interval_ms` - - * Value type is <> - * Default value is `"5000"` - -The frequency in milliseconds that the consumer offsets are committed to Kafka. - -[id="{version}-plugins-{type}s-{plugin}-auto_offset_reset"] -===== `auto_offset_reset` - - * Value type is <> - * There is no default value for this setting. - -What to do when there is no initial offset in Kafka or if an offset is out of range: - -* earliest: automatically reset the offset to the earliest offset -* latest: automatically reset the offset to the latest offset -* none: throw exception to the consumer if no previous offset is found for the consumer's group -* anything else: throw exception to the consumer. - -[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] -===== `bootstrap_servers` - - * Value type is <> - * Default value is `"localhost:9092"` - -A list of URLs of Kafka instances to use for establishing the initial connection to the cluster. -This list should be in the form of `host1:port1,host2:port2` These urls are just used -for the initial connection to discover the full cluster membership (which may change dynamically) -so this list need not contain the full set of servers (you may want more than one, though, in -case a server is down). - -[id="{version}-plugins-{type}s-{plugin}-check_crcs"] -===== `check_crcs` - - * Value type is <> - * There is no default value for this setting. - -Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk -corruption to the messages occurred. This check adds some overhead, so it may be -disabled in cases seeking extreme performance. - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * Value type is <> - * Default value is `"logstash"` - -The id string to pass to the server when making requests. The purpose of this -is to be able to track the source of requests beyond just ip/port by allowing -a logical application name to be included. - -[id="{version}-plugins-{type}s-{plugin}-connections_max_idle_ms"] -===== `connections_max_idle_ms` - - * Value type is <> - * There is no default value for this setting. - -Close idle connections after the number of milliseconds specified by this config. - -[id="{version}-plugins-{type}s-{plugin}-consumer_threads"] -===== `consumer_threads` - - * Value type is <> - * Default value is `1` - -Ideally you should have as many threads as the number of partitions for a perfect -balance — more threads than partitions means that some threads will be idle - -[id="{version}-plugins-{type}s-{plugin}-decorate_events"] -===== `decorate_events` - - * Value type is <> - * Default value is `false` - -Option to add Kafka metadata like topic, message size to the event. -This will add a field named `kafka` to the logstash event containing the following attributes: - `topic`: The topic this message is associated with - `consumer_group`: The consumer group used to read in this event - `partition`: The partition this message is associated with - `offset`: The offset from the partition this message is associated with - `key`: A ByteBuffer containing the message key - -[id="{version}-plugins-{type}s-{plugin}-enable_auto_commit"] -===== `enable_auto_commit` - - * Value type is <> - * Default value is `"true"` - -If true, periodically commit to Kafka the offsets of messages already returned by the consumer. -This committed offset will be used when the process fails as the position from -which the consumption will begin. - -[id="{version}-plugins-{type}s-{plugin}-exclude_internal_topics"] -===== `exclude_internal_topics` - - * Value type is <> - * There is no default value for this setting. - -Whether records from internal topics (such as offsets) should be exposed to the consumer. -If set to true the only way to receive records from an internal topic is subscribing to it. - -[id="{version}-plugins-{type}s-{plugin}-fetch_max_bytes"] -===== `fetch_max_bytes` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of data the server should return for a fetch request. This is not an -absolute maximum, if the first message in the first non-empty partition of the fetch is larger -than this value, the message will still be returned to ensure that the consumer can make progress. - -[id="{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms"] -===== `fetch_max_wait_ms` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of time the server will block before answering the fetch request if -there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This -should be less than or equal to the timeout used in `poll_timeout_ms` - -[id="{version}-plugins-{type}s-{plugin}-fetch_min_bytes"] -===== `fetch_min_bytes` - - * Value type is <> - * There is no default value for this setting. - -The minimum amount of data the server should return for a fetch request. If insufficient -data is available the request will wait for that much data to accumulate -before answering the request. - -[id="{version}-plugins-{type}s-{plugin}-group_id"] -===== `group_id` - - * Value type is <> - * Default value is `"logstash"` - -The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber -that happens to be made up of multiple processors. Messages in a topic will be distributed to all -Logstash instances with the same `group_id` - -[id="{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms"] -===== `heartbeat_interval_ms` - - * Value type is <> - * There is no default value for this setting. - -The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure -that the consumer's session stays active and to facilitate rebalancing when new -consumers join or leave the group. The value must be set lower than -`session.timeout.ms`, but typically should be set no higher than 1/3 of that value. -It can be adjusted even lower to control the expected time for normal rebalances. - -[id="{version}-plugins-{type}s-{plugin}-jaas_path"] -===== `jaas_path` - - * Value type is <> - * There is no default value for this setting. - -The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization -services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: -[source,java] ----------------------------------- -KafkaClient { - com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - renewTicket=true - serviceName="kafka"; - }; ----------------------------------- - -Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these -to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same -`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on -different JVM instances. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] -===== `kerberos_config` - - * Value type is <> - * There is no default value for this setting. - -Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html - -[id="{version}-plugins-{type}s-{plugin}-key_deserializer_class"] -===== `key_deserializer_class` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` - -Java Class used to deserialize the record's key - -[id="{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes"] -===== `max_partition_fetch_bytes` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of data per-partition the server will return. The maximum total memory used for a -request will be #partitions * max.partition.fetch.bytes. This size must be at least -as large as the maximum message size the server allows or else it is possible for the producer to -send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying -to fetch a large message on a certain partition. - -[id="{version}-plugins-{type}s-{plugin}-max_poll_interval_ms"] -===== `max_poll_interval_ms` - - * Value type is <> - * There is no default value for this setting. - -The maximum delay between invocations of poll() when using consumer group management. This places -an upper bound on the amount of time that the consumer can be idle before fetching more records. -If poll() is not called before expiration of this timeout, then the consumer is considered failed and -the group will rebalance in order to reassign the partitions to another member. -The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms - -[id="{version}-plugins-{type}s-{plugin}-max_poll_records"] -===== `max_poll_records` - - * Value type is <> - * There is no default value for this setting. - -The maximum number of records returned in a single call to poll(). - -[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] -===== `metadata_max_age_ms` - - * Value type is <> - * There is no default value for this setting. - -The period of time in milliseconds after which we force a refresh of metadata even if -we haven't seen any partition leadership changes to proactively discover any new brokers or partitions - -[id="{version}-plugins-{type}s-{plugin}-partition_assignment_strategy"] -===== `partition_assignment_strategy` - - * Value type is <> - * There is no default value for this setting. - -The class name of the partition assignment strategy that the client will use to distribute -partition ownership amongst consumer instances - -[id="{version}-plugins-{type}s-{plugin}-poll_timeout_ms"] -===== `poll_timeout_ms` - - * Value type is <> - * Default value is `100` - -Time kafka consumer will wait to receive new messages from topics - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] -===== `reconnect_backoff_ms` - - * Value type is <> - * There is no default value for this setting. - -The amount of time to wait before attempting to reconnect to a given host. -This avoids repeatedly connecting to a host in a tight loop. -This backoff applies to all requests sent by the consumer to the broker. - -[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] -===== `request_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The configuration controls the maximum amount of time the client will wait -for the response of a request. If the response is not received before the timeout -elapses the client will resend the request if necessary or fail the request if -retries are exhausted. - -[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] -===== `retry_backoff_ms` - - * Value type is <> - * There is no default value for this setting. - -The amount of time to wait before attempting to retry a failed fetch request -to a given topic partition. This avoids repeated fetching-and-failing in a tight loop. - -[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] -===== `sasl_kerberos_service_name` - - * Value type is <> - * There is no default value for this setting. - -The Kerberos principal name that Kafka broker runs as. -This can be defined either in Kafka's JAAS config or in Kafka's config. - -[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] -===== `sasl_mechanism` - - * Value type is <> - * Default value is `"GSSAPI"` - -http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. -This may be any mechanism for which a security provider is available. -GSSAPI is the default mechanism. - -[id="{version}-plugins-{type}s-{plugin}-security_protocol"] -===== `security_protocol` - - * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` - * Default value is `"PLAINTEXT"` - -Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL - -[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] -===== `send_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The size of the TCP send buffer (SO_SNDBUF) to use when sending data - -[id="{version}-plugins-{type}s-{plugin}-session_timeout_ms"] -===== `session_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead -and a rebalance operation is triggered for the group identified by `group_id` - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `false` - -Enable SSL/TLS secured communication to Kafka broker. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] -===== `ssl_key_password` - - * Value type is <> - * There is no default value for this setting. - -The password of the private key in the key store file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] -===== `ssl_keystore_location` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore path. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] -===== `ssl_keystore_password` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] -===== `ssl_keystore_type` - - * Value type is <> - * There is no default value for this setting. - -The keystore type. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] -===== `ssl_truststore_location` - - * Value type is <> - * There is no default value for this setting. - -The JKS truststore path to validate the Kafka broker's certificate. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] -===== `ssl_truststore_password` - - * Value type is <> - * There is no default value for this setting. - -The truststore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] -===== `ssl_truststore_type` - - * Value type is <> - * There is no default value for this setting. - -The truststore type. - -[id="{version}-plugins-{type}s-{plugin}-topics"] -===== `topics` - - * Value type is <> - * Default value is `["logstash"]` - -A list of topics to subscribe to, defaults to ["logstash"]. - -[id="{version}-plugins-{type}s-{plugin}-topics_pattern"] -===== `topics_pattern` - - * Value type is <> - * There is no default value for this setting. - -A topic regex pattern to subscribe to. -The topics configuration will be ignored when using this configuration. - -[id="{version}-plugins-{type}s-{plugin}-value_deserializer_class"] -===== `value_deserializer_class` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` - -Java Class used to deserialize the record's value - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/kafka-v6.3.2.asciidoc b/docs/versioned-plugins/inputs/kafka-v6.3.2.asciidoc deleted file mode 100644 index 0667ec1ff..000000000 --- a/docs/versioned-plugins/inputs/kafka-v6.3.2.asciidoc +++ /dev/null @@ -1,552 +0,0 @@ -:plugin: kafka -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v6.3.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-kafka/blob/v6.3.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Kafka input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will read events from a Kafka topic. It uses the 0.10 version of -the consumer API provided by Kafka to read messages from the broker. - -Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination -of Logstash and the Kafka input plugin: - -[options="header"] -|========================================================== -|Kafka Client Version |Logstash Version |Plugin Version |Why? -|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular -|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) -|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) -|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker -|0.10.1.x |2.4.x - 5.x.x | 6.x.x | -|========================================================== - -NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should -upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker -is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. - -This input supports connecting to Kafka over: - -* SSL (requires plugin version 3.0.0 or later) -* Kerberos SASL (requires plugin version 5.1.0 or later) - -By default security is disabled but can be turned on as needed. - -The Logstash Kafka consumer handles group management and uses the default offset management -strategy using Kafka topics. - -Logstash instances by default form a single logical group to subscribe to Kafka topics -Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively, -you could run multiple Logstash instances with the same `group_id` to spread the load across -physical machines. Messages in a topic will be distributed to all Logstash instances with -the same `group_id`. - -Ideally you should have as many threads as the number of partitions for a perfect balance -- -more threads than partitions means that some threads will be idle - -For more information see http://kafka.apache.org/documentation.html#theconsumer - -Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kafka Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-auto_offset_reset>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-check_crcs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connections_max_idle_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-consumer_threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-decorate_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-enable_auto_commit>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_internal_topics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_max_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_min_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-group_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-key_deserializer_class>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_poll_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_poll_records>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-partition_assignment_strategy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-poll_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No -| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topics_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-value_deserializer_class>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms"] -===== `auto_commit_interval_ms` - - * Value type is <> - * Default value is `"5000"` - -The frequency in milliseconds that the consumer offsets are committed to Kafka. - -[id="{version}-plugins-{type}s-{plugin}-auto_offset_reset"] -===== `auto_offset_reset` - - * Value type is <> - * There is no default value for this setting. - -What to do when there is no initial offset in Kafka or if an offset is out of range: - -* earliest: automatically reset the offset to the earliest offset -* latest: automatically reset the offset to the latest offset -* none: throw exception to the consumer if no previous offset is found for the consumer's group -* anything else: throw exception to the consumer. - -[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] -===== `bootstrap_servers` - - * Value type is <> - * Default value is `"localhost:9092"` - -A list of URLs of Kafka instances to use for establishing the initial connection to the cluster. -This list should be in the form of `host1:port1,host2:port2` These urls are just used -for the initial connection to discover the full cluster membership (which may change dynamically) -so this list need not contain the full set of servers (you may want more than one, though, in -case a server is down). - -[id="{version}-plugins-{type}s-{plugin}-check_crcs"] -===== `check_crcs` - - * Value type is <> - * There is no default value for this setting. - -Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk -corruption to the messages occurred. This check adds some overhead, so it may be -disabled in cases seeking extreme performance. - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * Value type is <> - * Default value is `"logstash"` - -The id string to pass to the server when making requests. The purpose of this -is to be able to track the source of requests beyond just ip/port by allowing -a logical application name to be included. - -[id="{version}-plugins-{type}s-{plugin}-connections_max_idle_ms"] -===== `connections_max_idle_ms` - - * Value type is <> - * There is no default value for this setting. - -Close idle connections after the number of milliseconds specified by this config. - -[id="{version}-plugins-{type}s-{plugin}-consumer_threads"] -===== `consumer_threads` - - * Value type is <> - * Default value is `1` - -Ideally you should have as many threads as the number of partitions for a perfect -balance — more threads than partitions means that some threads will be idle - -[id="{version}-plugins-{type}s-{plugin}-decorate_events"] -===== `decorate_events` - - * Value type is <> - * Default value is `false` - -Option to add Kafka metadata like topic, message size to the event. -This will add a field named `kafka` to the logstash event containing the following attributes: - `topic`: The topic this message is associated with - `consumer_group`: The consumer group used to read in this event - `partition`: The partition this message is associated with - `offset`: The offset from the partition this message is associated with - `key`: A ByteBuffer containing the message key - -[id="{version}-plugins-{type}s-{plugin}-enable_auto_commit"] -===== `enable_auto_commit` - - * Value type is <> - * Default value is `"true"` - -If true, periodically commit to Kafka the offsets of messages already returned by the consumer. -This committed offset will be used when the process fails as the position from -which the consumption will begin. - -[id="{version}-plugins-{type}s-{plugin}-exclude_internal_topics"] -===== `exclude_internal_topics` - - * Value type is <> - * There is no default value for this setting. - -Whether records from internal topics (such as offsets) should be exposed to the consumer. -If set to true the only way to receive records from an internal topic is subscribing to it. - -[id="{version}-plugins-{type}s-{plugin}-fetch_max_bytes"] -===== `fetch_max_bytes` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of data the server should return for a fetch request. This is not an -absolute maximum, if the first message in the first non-empty partition of the fetch is larger -than this value, the message will still be returned to ensure that the consumer can make progress. - -[id="{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms"] -===== `fetch_max_wait_ms` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of time the server will block before answering the fetch request if -there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This -should be less than or equal to the timeout used in `poll_timeout_ms` - -[id="{version}-plugins-{type}s-{plugin}-fetch_min_bytes"] -===== `fetch_min_bytes` - - * Value type is <> - * There is no default value for this setting. - -The minimum amount of data the server should return for a fetch request. If insufficient -data is available the request will wait for that much data to accumulate -before answering the request. - -[id="{version}-plugins-{type}s-{plugin}-group_id"] -===== `group_id` - - * Value type is <> - * Default value is `"logstash"` - -The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber -that happens to be made up of multiple processors. Messages in a topic will be distributed to all -Logstash instances with the same `group_id` - -[id="{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms"] -===== `heartbeat_interval_ms` - - * Value type is <> - * There is no default value for this setting. - -The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure -that the consumer's session stays active and to facilitate rebalancing when new -consumers join or leave the group. The value must be set lower than -`session.timeout.ms`, but typically should be set no higher than 1/3 of that value. -It can be adjusted even lower to control the expected time for normal rebalances. - -[id="{version}-plugins-{type}s-{plugin}-jaas_path"] -===== `jaas_path` - - * Value type is <> - * There is no default value for this setting. - -The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization -services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: -[source,java] ----------------------------------- -KafkaClient { - com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - renewTicket=true - serviceName="kafka"; - }; ----------------------------------- - -Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these -to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same -`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on -different JVM instances. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] -===== `kerberos_config` - - * Value type is <> - * There is no default value for this setting. - -Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html - -[id="{version}-plugins-{type}s-{plugin}-key_deserializer_class"] -===== `key_deserializer_class` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` - -Java Class used to deserialize the record's key - -[id="{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes"] -===== `max_partition_fetch_bytes` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of data per-partition the server will return. The maximum total memory used for a -request will be #partitions * max.partition.fetch.bytes. This size must be at least -as large as the maximum message size the server allows or else it is possible for the producer to -send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying -to fetch a large message on a certain partition. - -[id="{version}-plugins-{type}s-{plugin}-max_poll_interval_ms"] -===== `max_poll_interval_ms` - - * Value type is <> - * There is no default value for this setting. - -The maximum delay between invocations of poll() when using consumer group management. This places -an upper bound on the amount of time that the consumer can be idle before fetching more records. -If poll() is not called before expiration of this timeout, then the consumer is considered failed and -the group will rebalance in order to reassign the partitions to another member. -The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms - -[id="{version}-plugins-{type}s-{plugin}-max_poll_records"] -===== `max_poll_records` - - * Value type is <> - * There is no default value for this setting. - -The maximum number of records returned in a single call to poll(). - -[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] -===== `metadata_max_age_ms` - - * Value type is <> - * There is no default value for this setting. - -The period of time in milliseconds after which we force a refresh of metadata even if -we haven't seen any partition leadership changes to proactively discover any new brokers or partitions - -[id="{version}-plugins-{type}s-{plugin}-partition_assignment_strategy"] -===== `partition_assignment_strategy` - - * Value type is <> - * There is no default value for this setting. - -The class name of the partition assignment strategy that the client will use to distribute -partition ownership amongst consumer instances - -[id="{version}-plugins-{type}s-{plugin}-poll_timeout_ms"] -===== `poll_timeout_ms` - - * Value type is <> - * Default value is `100` - -Time kafka consumer will wait to receive new messages from topics - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] -===== `reconnect_backoff_ms` - - * Value type is <> - * There is no default value for this setting. - -The amount of time to wait before attempting to reconnect to a given host. -This avoids repeatedly connecting to a host in a tight loop. -This backoff applies to all requests sent by the consumer to the broker. - -[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] -===== `request_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The configuration controls the maximum amount of time the client will wait -for the response of a request. If the response is not received before the timeout -elapses the client will resend the request if necessary or fail the request if -retries are exhausted. - -[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] -===== `retry_backoff_ms` - - * Value type is <> - * There is no default value for this setting. - -The amount of time to wait before attempting to retry a failed fetch request -to a given topic partition. This avoids repeated fetching-and-failing in a tight loop. - -[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] -===== `sasl_kerberos_service_name` - - * Value type is <> - * There is no default value for this setting. - -The Kerberos principal name that Kafka broker runs as. -This can be defined either in Kafka's JAAS config or in Kafka's config. - -[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] -===== `sasl_mechanism` - - * Value type is <> - * Default value is `"GSSAPI"` - -http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. -This may be any mechanism for which a security provider is available. -GSSAPI is the default mechanism. - -[id="{version}-plugins-{type}s-{plugin}-security_protocol"] -===== `security_protocol` - - * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` - * Default value is `"PLAINTEXT"` - -Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL - -[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] -===== `send_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The size of the TCP send buffer (SO_SNDBUF) to use when sending data - -[id="{version}-plugins-{type}s-{plugin}-session_timeout_ms"] -===== `session_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead -and a rebalance operation is triggered for the group identified by `group_id` - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `false` - -Enable SSL/TLS secured communication to Kafka broker. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] -===== `ssl_key_password` - - * Value type is <> - * There is no default value for this setting. - -The password of the private key in the key store file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] -===== `ssl_keystore_location` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore path. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] -===== `ssl_keystore_password` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] -===== `ssl_keystore_type` - - * Value type is <> - * There is no default value for this setting. - -The keystore type. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] -===== `ssl_truststore_location` - - * Value type is <> - * There is no default value for this setting. - -The JKS truststore path to validate the Kafka broker's certificate. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] -===== `ssl_truststore_password` - - * Value type is <> - * There is no default value for this setting. - -The truststore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] -===== `ssl_truststore_type` - - * Value type is <> - * There is no default value for this setting. - -The truststore type. - -[id="{version}-plugins-{type}s-{plugin}-topics"] -===== `topics` - - * Value type is <> - * Default value is `["logstash"]` - -A list of topics to subscribe to, defaults to ["logstash"]. - -[id="{version}-plugins-{type}s-{plugin}-topics_pattern"] -===== `topics_pattern` - - * Value type is <> - * There is no default value for this setting. - -A topic regex pattern to subscribe to. -The topics configuration will be ignored when using this configuration. - -[id="{version}-plugins-{type}s-{plugin}-value_deserializer_class"] -===== `value_deserializer_class` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` - -Java Class used to deserialize the record's value - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/kafka-v6.3.3.asciidoc b/docs/versioned-plugins/inputs/kafka-v6.3.3.asciidoc deleted file mode 100644 index b33854113..000000000 --- a/docs/versioned-plugins/inputs/kafka-v6.3.3.asciidoc +++ /dev/null @@ -1,553 +0,0 @@ -:plugin: kafka -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v6.3.3 -:release_date: 2017-07-11 -:changelog_url: https://github.com/logstash-plugins/logstash-input-kafka/blob/v6.3.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Kafka input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will read events from a Kafka topic. It uses the 0.10 version of -the consumer API provided by Kafka to read messages from the broker. - -Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination -of Logstash and the Kafka input plugin: - -[options="header"] -|========================================================== -|Kafka Client Version |Logstash Version |Plugin Version |Why? -|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular -|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) -|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) -|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker -|0.10.1.x |2.4.x - 5.x.x | 6.x.x | -|0.11.0.0 |2.4.x - 5.x.x | 6.x.x |Not compatible with the <= 0.9 broker -|========================================================== - -NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should -upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker -is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. - -This input supports connecting to Kafka over: - -* SSL (requires plugin version 3.0.0 or later) -* Kerberos SASL (requires plugin version 5.1.0 or later) - -By default security is disabled but can be turned on as needed. - -The Logstash Kafka consumer handles group management and uses the default offset management -strategy using Kafka topics. - -Logstash instances by default form a single logical group to subscribe to Kafka topics -Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively, -you could run multiple Logstash instances with the same `group_id` to spread the load across -physical machines. Messages in a topic will be distributed to all Logstash instances with -the same `group_id`. - -Ideally you should have as many threads as the number of partitions for a perfect balance -- -more threads than partitions means that some threads will be idle - -For more information see http://kafka.apache.org/documentation.html#theconsumer - -Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kafka Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-auto_offset_reset>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-check_crcs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connections_max_idle_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-consumer_threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-decorate_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-enable_auto_commit>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_internal_topics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_max_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_min_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-group_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-key_deserializer_class>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_poll_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_poll_records>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-partition_assignment_strategy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-poll_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No -| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topics_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-value_deserializer_class>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms"] -===== `auto_commit_interval_ms` - - * Value type is <> - * Default value is `"5000"` - -The frequency in milliseconds that the consumer offsets are committed to Kafka. - -[id="{version}-plugins-{type}s-{plugin}-auto_offset_reset"] -===== `auto_offset_reset` - - * Value type is <> - * There is no default value for this setting. - -What to do when there is no initial offset in Kafka or if an offset is out of range: - -* earliest: automatically reset the offset to the earliest offset -* latest: automatically reset the offset to the latest offset -* none: throw exception to the consumer if no previous offset is found for the consumer's group -* anything else: throw exception to the consumer. - -[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] -===== `bootstrap_servers` - - * Value type is <> - * Default value is `"localhost:9092"` - -A list of URLs of Kafka instances to use for establishing the initial connection to the cluster. -This list should be in the form of `host1:port1,host2:port2` These urls are just used -for the initial connection to discover the full cluster membership (which may change dynamically) -so this list need not contain the full set of servers (you may want more than one, though, in -case a server is down). - -[id="{version}-plugins-{type}s-{plugin}-check_crcs"] -===== `check_crcs` - - * Value type is <> - * There is no default value for this setting. - -Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk -corruption to the messages occurred. This check adds some overhead, so it may be -disabled in cases seeking extreme performance. - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * Value type is <> - * Default value is `"logstash"` - -The id string to pass to the server when making requests. The purpose of this -is to be able to track the source of requests beyond just ip/port by allowing -a logical application name to be included. - -[id="{version}-plugins-{type}s-{plugin}-connections_max_idle_ms"] -===== `connections_max_idle_ms` - - * Value type is <> - * There is no default value for this setting. - -Close idle connections after the number of milliseconds specified by this config. - -[id="{version}-plugins-{type}s-{plugin}-consumer_threads"] -===== `consumer_threads` - - * Value type is <> - * Default value is `1` - -Ideally you should have as many threads as the number of partitions for a perfect -balance — more threads than partitions means that some threads will be idle - -[id="{version}-plugins-{type}s-{plugin}-decorate_events"] -===== `decorate_events` - - * Value type is <> - * Default value is `false` - -Option to add Kafka metadata like topic, message size to the event. -This will add a field named `kafka` to the logstash event containing the following attributes: - `topic`: The topic this message is associated with - `consumer_group`: The consumer group used to read in this event - `partition`: The partition this message is associated with - `offset`: The offset from the partition this message is associated with - `key`: A ByteBuffer containing the message key - -[id="{version}-plugins-{type}s-{plugin}-enable_auto_commit"] -===== `enable_auto_commit` - - * Value type is <> - * Default value is `"true"` - -If true, periodically commit to Kafka the offsets of messages already returned by the consumer. -This committed offset will be used when the process fails as the position from -which the consumption will begin. - -[id="{version}-plugins-{type}s-{plugin}-exclude_internal_topics"] -===== `exclude_internal_topics` - - * Value type is <> - * There is no default value for this setting. - -Whether records from internal topics (such as offsets) should be exposed to the consumer. -If set to true the only way to receive records from an internal topic is subscribing to it. - -[id="{version}-plugins-{type}s-{plugin}-fetch_max_bytes"] -===== `fetch_max_bytes` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of data the server should return for a fetch request. This is not an -absolute maximum, if the first message in the first non-empty partition of the fetch is larger -than this value, the message will still be returned to ensure that the consumer can make progress. - -[id="{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms"] -===== `fetch_max_wait_ms` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of time the server will block before answering the fetch request if -there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This -should be less than or equal to the timeout used in `poll_timeout_ms` - -[id="{version}-plugins-{type}s-{plugin}-fetch_min_bytes"] -===== `fetch_min_bytes` - - * Value type is <> - * There is no default value for this setting. - -The minimum amount of data the server should return for a fetch request. If insufficient -data is available the request will wait for that much data to accumulate -before answering the request. - -[id="{version}-plugins-{type}s-{plugin}-group_id"] -===== `group_id` - - * Value type is <> - * Default value is `"logstash"` - -The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber -that happens to be made up of multiple processors. Messages in a topic will be distributed to all -Logstash instances with the same `group_id` - -[id="{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms"] -===== `heartbeat_interval_ms` - - * Value type is <> - * There is no default value for this setting. - -The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure -that the consumer's session stays active and to facilitate rebalancing when new -consumers join or leave the group. The value must be set lower than -`session.timeout.ms`, but typically should be set no higher than 1/3 of that value. -It can be adjusted even lower to control the expected time for normal rebalances. - -[id="{version}-plugins-{type}s-{plugin}-jaas_path"] -===== `jaas_path` - - * Value type is <> - * There is no default value for this setting. - -The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization -services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: -[source,java] ----------------------------------- -KafkaClient { - com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - renewTicket=true - serviceName="kafka"; - }; ----------------------------------- - -Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these -to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same -`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on -different JVM instances. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] -===== `kerberos_config` - - * Value type is <> - * There is no default value for this setting. - -Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html - -[id="{version}-plugins-{type}s-{plugin}-key_deserializer_class"] -===== `key_deserializer_class` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` - -Java Class used to deserialize the record's key - -[id="{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes"] -===== `max_partition_fetch_bytes` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of data per-partition the server will return. The maximum total memory used for a -request will be #partitions * max.partition.fetch.bytes. This size must be at least -as large as the maximum message size the server allows or else it is possible for the producer to -send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying -to fetch a large message on a certain partition. - -[id="{version}-plugins-{type}s-{plugin}-max_poll_interval_ms"] -===== `max_poll_interval_ms` - - * Value type is <> - * There is no default value for this setting. - -The maximum delay between invocations of poll() when using consumer group management. This places -an upper bound on the amount of time that the consumer can be idle before fetching more records. -If poll() is not called before expiration of this timeout, then the consumer is considered failed and -the group will rebalance in order to reassign the partitions to another member. -The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms - -[id="{version}-plugins-{type}s-{plugin}-max_poll_records"] -===== `max_poll_records` - - * Value type is <> - * There is no default value for this setting. - -The maximum number of records returned in a single call to poll(). - -[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] -===== `metadata_max_age_ms` - - * Value type is <> - * There is no default value for this setting. - -The period of time in milliseconds after which we force a refresh of metadata even if -we haven't seen any partition leadership changes to proactively discover any new brokers or partitions - -[id="{version}-plugins-{type}s-{plugin}-partition_assignment_strategy"] -===== `partition_assignment_strategy` - - * Value type is <> - * There is no default value for this setting. - -The class name of the partition assignment strategy that the client will use to distribute -partition ownership amongst consumer instances - -[id="{version}-plugins-{type}s-{plugin}-poll_timeout_ms"] -===== `poll_timeout_ms` - - * Value type is <> - * Default value is `100` - -Time kafka consumer will wait to receive new messages from topics - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] -===== `reconnect_backoff_ms` - - * Value type is <> - * There is no default value for this setting. - -The amount of time to wait before attempting to reconnect to a given host. -This avoids repeatedly connecting to a host in a tight loop. -This backoff applies to all requests sent by the consumer to the broker. - -[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] -===== `request_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The configuration controls the maximum amount of time the client will wait -for the response of a request. If the response is not received before the timeout -elapses the client will resend the request if necessary or fail the request if -retries are exhausted. - -[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] -===== `retry_backoff_ms` - - * Value type is <> - * There is no default value for this setting. - -The amount of time to wait before attempting to retry a failed fetch request -to a given topic partition. This avoids repeated fetching-and-failing in a tight loop. - -[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] -===== `sasl_kerberos_service_name` - - * Value type is <> - * There is no default value for this setting. - -The Kerberos principal name that Kafka broker runs as. -This can be defined either in Kafka's JAAS config or in Kafka's config. - -[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] -===== `sasl_mechanism` - - * Value type is <> - * Default value is `"GSSAPI"` - -http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. -This may be any mechanism for which a security provider is available. -GSSAPI is the default mechanism. - -[id="{version}-plugins-{type}s-{plugin}-security_protocol"] -===== `security_protocol` - - * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` - * Default value is `"PLAINTEXT"` - -Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL - -[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] -===== `send_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The size of the TCP send buffer (SO_SNDBUF) to use when sending data - -[id="{version}-plugins-{type}s-{plugin}-session_timeout_ms"] -===== `session_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead -and a rebalance operation is triggered for the group identified by `group_id` - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `false` - -Enable SSL/TLS secured communication to Kafka broker. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] -===== `ssl_key_password` - - * Value type is <> - * There is no default value for this setting. - -The password of the private key in the key store file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] -===== `ssl_keystore_location` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore path. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] -===== `ssl_keystore_password` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] -===== `ssl_keystore_type` - - * Value type is <> - * There is no default value for this setting. - -The keystore type. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] -===== `ssl_truststore_location` - - * Value type is <> - * There is no default value for this setting. - -The JKS truststore path to validate the Kafka broker's certificate. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] -===== `ssl_truststore_password` - - * Value type is <> - * There is no default value for this setting. - -The truststore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] -===== `ssl_truststore_type` - - * Value type is <> - * There is no default value for this setting. - -The truststore type. - -[id="{version}-plugins-{type}s-{plugin}-topics"] -===== `topics` - - * Value type is <> - * Default value is `["logstash"]` - -A list of topics to subscribe to, defaults to ["logstash"]. - -[id="{version}-plugins-{type}s-{plugin}-topics_pattern"] -===== `topics_pattern` - - * Value type is <> - * There is no default value for this setting. - -A topic regex pattern to subscribe to. -The topics configuration will be ignored when using this configuration. - -[id="{version}-plugins-{type}s-{plugin}-value_deserializer_class"] -===== `value_deserializer_class` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` - -Java Class used to deserialize the record's value - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/kafka-v6.3.4.asciidoc b/docs/versioned-plugins/inputs/kafka-v6.3.4.asciidoc deleted file mode 100644 index c8560778f..000000000 --- a/docs/versioned-plugins/inputs/kafka-v6.3.4.asciidoc +++ /dev/null @@ -1,553 +0,0 @@ -:plugin: kafka -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v6.3.4 -:release_date: 2017-07-13 -:changelog_url: https://github.com/logstash-plugins/logstash-input-kafka/blob/v6.3.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Kafka input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will read events from a Kafka topic. It uses the 0.10 version of -the consumer API provided by Kafka to read messages from the broker. - -Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination -of Logstash and the Kafka input plugin: - -[options="header"] -|========================================================== -|Kafka Client Version |Logstash Version |Plugin Version |Why? -|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular -|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) -|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) -|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker -|0.10.1.x |2.4.x - 5.x.x | 6.x.x | -|0.11.0.0 |2.4.x - 5.x.x | 6.x.x |Not compatible with the <= 0.9 broker -|========================================================== - -NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should -upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker -is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. - -This input supports connecting to Kafka over: - -* SSL (requires plugin version 3.0.0 or later) -* Kerberos SASL (requires plugin version 5.1.0 or later) - -By default security is disabled but can be turned on as needed. - -The Logstash Kafka consumer handles group management and uses the default offset management -strategy using Kafka topics. - -Logstash instances by default form a single logical group to subscribe to Kafka topics -Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively, -you could run multiple Logstash instances with the same `group_id` to spread the load across -physical machines. Messages in a topic will be distributed to all Logstash instances with -the same `group_id`. - -Ideally you should have as many threads as the number of partitions for a perfect balance -- -more threads than partitions means that some threads will be idle - -For more information see http://kafka.apache.org/documentation.html#theconsumer - -Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kafka Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-auto_offset_reset>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-check_crcs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connections_max_idle_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-consumer_threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-decorate_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-enable_auto_commit>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_internal_topics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_max_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_min_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-group_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-key_deserializer_class>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_poll_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_poll_records>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-partition_assignment_strategy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-poll_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No -| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topics_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-value_deserializer_class>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms"] -===== `auto_commit_interval_ms` - - * Value type is <> - * Default value is `"5000"` - -The frequency in milliseconds that the consumer offsets are committed to Kafka. - -[id="{version}-plugins-{type}s-{plugin}-auto_offset_reset"] -===== `auto_offset_reset` - - * Value type is <> - * There is no default value for this setting. - -What to do when there is no initial offset in Kafka or if an offset is out of range: - -* earliest: automatically reset the offset to the earliest offset -* latest: automatically reset the offset to the latest offset -* none: throw exception to the consumer if no previous offset is found for the consumer's group -* anything else: throw exception to the consumer. - -[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] -===== `bootstrap_servers` - - * Value type is <> - * Default value is `"localhost:9092"` - -A list of URLs of Kafka instances to use for establishing the initial connection to the cluster. -This list should be in the form of `host1:port1,host2:port2` These urls are just used -for the initial connection to discover the full cluster membership (which may change dynamically) -so this list need not contain the full set of servers (you may want more than one, though, in -case a server is down). - -[id="{version}-plugins-{type}s-{plugin}-check_crcs"] -===== `check_crcs` - - * Value type is <> - * There is no default value for this setting. - -Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk -corruption to the messages occurred. This check adds some overhead, so it may be -disabled in cases seeking extreme performance. - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * Value type is <> - * Default value is `"logstash"` - -The id string to pass to the server when making requests. The purpose of this -is to be able to track the source of requests beyond just ip/port by allowing -a logical application name to be included. - -[id="{version}-plugins-{type}s-{plugin}-connections_max_idle_ms"] -===== `connections_max_idle_ms` - - * Value type is <> - * There is no default value for this setting. - -Close idle connections after the number of milliseconds specified by this config. - -[id="{version}-plugins-{type}s-{plugin}-consumer_threads"] -===== `consumer_threads` - - * Value type is <> - * Default value is `1` - -Ideally you should have as many threads as the number of partitions for a perfect -balance — more threads than partitions means that some threads will be idle - -[id="{version}-plugins-{type}s-{plugin}-decorate_events"] -===== `decorate_events` - - * Value type is <> - * Default value is `false` - -Option to add Kafka metadata like topic, message size to the event. -This will add a field named `kafka` to the logstash event containing the following attributes: - `topic`: The topic this message is associated with - `consumer_group`: The consumer group used to read in this event - `partition`: The partition this message is associated with - `offset`: The offset from the partition this message is associated with - `key`: A ByteBuffer containing the message key - -[id="{version}-plugins-{type}s-{plugin}-enable_auto_commit"] -===== `enable_auto_commit` - - * Value type is <> - * Default value is `"true"` - -If true, periodically commit to Kafka the offsets of messages already returned by the consumer. -This committed offset will be used when the process fails as the position from -which the consumption will begin. - -[id="{version}-plugins-{type}s-{plugin}-exclude_internal_topics"] -===== `exclude_internal_topics` - - * Value type is <> - * There is no default value for this setting. - -Whether records from internal topics (such as offsets) should be exposed to the consumer. -If set to true the only way to receive records from an internal topic is subscribing to it. - -[id="{version}-plugins-{type}s-{plugin}-fetch_max_bytes"] -===== `fetch_max_bytes` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of data the server should return for a fetch request. This is not an -absolute maximum, if the first message in the first non-empty partition of the fetch is larger -than this value, the message will still be returned to ensure that the consumer can make progress. - -[id="{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms"] -===== `fetch_max_wait_ms` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of time the server will block before answering the fetch request if -there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This -should be less than or equal to the timeout used in `poll_timeout_ms` - -[id="{version}-plugins-{type}s-{plugin}-fetch_min_bytes"] -===== `fetch_min_bytes` - - * Value type is <> - * There is no default value for this setting. - -The minimum amount of data the server should return for a fetch request. If insufficient -data is available the request will wait for that much data to accumulate -before answering the request. - -[id="{version}-plugins-{type}s-{plugin}-group_id"] -===== `group_id` - - * Value type is <> - * Default value is `"logstash"` - -The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber -that happens to be made up of multiple processors. Messages in a topic will be distributed to all -Logstash instances with the same `group_id` - -[id="{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms"] -===== `heartbeat_interval_ms` - - * Value type is <> - * There is no default value for this setting. - -The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure -that the consumer's session stays active and to facilitate rebalancing when new -consumers join or leave the group. The value must be set lower than -`session.timeout.ms`, but typically should be set no higher than 1/3 of that value. -It can be adjusted even lower to control the expected time for normal rebalances. - -[id="{version}-plugins-{type}s-{plugin}-jaas_path"] -===== `jaas_path` - - * Value type is <> - * There is no default value for this setting. - -The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization -services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: -[source,java] ----------------------------------- -KafkaClient { - com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - renewTicket=true - serviceName="kafka"; - }; ----------------------------------- - -Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these -to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same -`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on -different JVM instances. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] -===== `kerberos_config` - - * Value type is <> - * There is no default value for this setting. - -Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html - -[id="{version}-plugins-{type}s-{plugin}-key_deserializer_class"] -===== `key_deserializer_class` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` - -Java Class used to deserialize the record's key - -[id="{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes"] -===== `max_partition_fetch_bytes` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of data per-partition the server will return. The maximum total memory used for a -request will be #partitions * max.partition.fetch.bytes. This size must be at least -as large as the maximum message size the server allows or else it is possible for the producer to -send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying -to fetch a large message on a certain partition. - -[id="{version}-plugins-{type}s-{plugin}-max_poll_interval_ms"] -===== `max_poll_interval_ms` - - * Value type is <> - * There is no default value for this setting. - -The maximum delay between invocations of poll() when using consumer group management. This places -an upper bound on the amount of time that the consumer can be idle before fetching more records. -If poll() is not called before expiration of this timeout, then the consumer is considered failed and -the group will rebalance in order to reassign the partitions to another member. -The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms - -[id="{version}-plugins-{type}s-{plugin}-max_poll_records"] -===== `max_poll_records` - - * Value type is <> - * There is no default value for this setting. - -The maximum number of records returned in a single call to poll(). - -[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] -===== `metadata_max_age_ms` - - * Value type is <> - * There is no default value for this setting. - -The period of time in milliseconds after which we force a refresh of metadata even if -we haven't seen any partition leadership changes to proactively discover any new brokers or partitions - -[id="{version}-plugins-{type}s-{plugin}-partition_assignment_strategy"] -===== `partition_assignment_strategy` - - * Value type is <> - * There is no default value for this setting. - -The class name of the partition assignment strategy that the client will use to distribute -partition ownership amongst consumer instances - -[id="{version}-plugins-{type}s-{plugin}-poll_timeout_ms"] -===== `poll_timeout_ms` - - * Value type is <> - * Default value is `100` - -Time kafka consumer will wait to receive new messages from topics - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] -===== `reconnect_backoff_ms` - - * Value type is <> - * There is no default value for this setting. - -The amount of time to wait before attempting to reconnect to a given host. -This avoids repeatedly connecting to a host in a tight loop. -This backoff applies to all requests sent by the consumer to the broker. - -[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] -===== `request_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The configuration controls the maximum amount of time the client will wait -for the response of a request. If the response is not received before the timeout -elapses the client will resend the request if necessary or fail the request if -retries are exhausted. - -[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] -===== `retry_backoff_ms` - - * Value type is <> - * There is no default value for this setting. - -The amount of time to wait before attempting to retry a failed fetch request -to a given topic partition. This avoids repeated fetching-and-failing in a tight loop. - -[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] -===== `sasl_kerberos_service_name` - - * Value type is <> - * There is no default value for this setting. - -The Kerberos principal name that Kafka broker runs as. -This can be defined either in Kafka's JAAS config or in Kafka's config. - -[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] -===== `sasl_mechanism` - - * Value type is <> - * Default value is `"GSSAPI"` - -http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. -This may be any mechanism for which a security provider is available. -GSSAPI is the default mechanism. - -[id="{version}-plugins-{type}s-{plugin}-security_protocol"] -===== `security_protocol` - - * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` - * Default value is `"PLAINTEXT"` - -Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL - -[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] -===== `send_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The size of the TCP send buffer (SO_SNDBUF) to use when sending data - -[id="{version}-plugins-{type}s-{plugin}-session_timeout_ms"] -===== `session_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead -and a rebalance operation is triggered for the group identified by `group_id` - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `false` - -Enable SSL/TLS secured communication to Kafka broker. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] -===== `ssl_key_password` - - * Value type is <> - * There is no default value for this setting. - -The password of the private key in the key store file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] -===== `ssl_keystore_location` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore path. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] -===== `ssl_keystore_password` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] -===== `ssl_keystore_type` - - * Value type is <> - * There is no default value for this setting. - -The keystore type. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] -===== `ssl_truststore_location` - - * Value type is <> - * There is no default value for this setting. - -The JKS truststore path to validate the Kafka broker's certificate. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] -===== `ssl_truststore_password` - - * Value type is <> - * There is no default value for this setting. - -The truststore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] -===== `ssl_truststore_type` - - * Value type is <> - * There is no default value for this setting. - -The truststore type. - -[id="{version}-plugins-{type}s-{plugin}-topics"] -===== `topics` - - * Value type is <> - * Default value is `["logstash"]` - -A list of topics to subscribe to, defaults to ["logstash"]. - -[id="{version}-plugins-{type}s-{plugin}-topics_pattern"] -===== `topics_pattern` - - * Value type is <> - * There is no default value for this setting. - -A topic regex pattern to subscribe to. -The topics configuration will be ignored when using this configuration. - -[id="{version}-plugins-{type}s-{plugin}-value_deserializer_class"] -===== `value_deserializer_class` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` - -Java Class used to deserialize the record's value - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/kafka-v7.0.0.asciidoc b/docs/versioned-plugins/inputs/kafka-v7.0.0.asciidoc deleted file mode 100644 index 0f0c1d321..000000000 --- a/docs/versioned-plugins/inputs/kafka-v7.0.0.asciidoc +++ /dev/null @@ -1,566 +0,0 @@ -:plugin: kafka -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v7.0.0 -:release_date: 2017-07-18 -:changelog_url: https://github.com/logstash-plugins/logstash-input-kafka/blob/v7.0.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Kafka input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will read events from a Kafka topic. It uses the 0.10 version of -the consumer API provided by Kafka to read messages from the broker. - -Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination -of Logstash and the Kafka input plugin: - -[options="header"] -|========================================================== -|Kafka Client Version |Logstash Version |Plugin Version |Why? -|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular -|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) -|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) -|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker -|0.10.1.x |2.4.x - 5.x.x | 6.x.x | -|0.11.0.0 |2.4.x - 5.x.x | 6.x.x |Not compatible with the <= 0.9 broker -|========================================================== - -NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should -upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker -is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. - -This input supports connecting to Kafka over: - -* SSL (requires plugin version 3.0.0 or later) -* Kerberos SASL (requires plugin version 5.1.0 or later) - -By default security is disabled but can be turned on as needed. - -The Logstash Kafka consumer handles group management and uses the default offset management -strategy using Kafka topics. - -Logstash instances by default form a single logical group to subscribe to Kafka topics -Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively, -you could run multiple Logstash instances with the same `group_id` to spread the load across -physical machines. Messages in a topic will be distributed to all Logstash instances with -the same `group_id`. - -Ideally you should have as many threads as the number of partitions for a perfect balance -- -more threads than partitions means that some threads will be idle - -For more information see http://kafka.apache.org/documentation.html#theconsumer - -Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs - -==== Metadata fields - -The following metadata from Kafka broker are added under the `[@metadata]` field: - -* `[@metadata][kafka][topic]`: Original Kafka topic from where the message was consumed. -* `[@metadata][kafka][consumer_group]`: Consumer group -* `[@metadata][kafka][partition]`: Partition info for this message. -* `[@metadata][kafka][offset]`: Original record offset for this message. -* `[@metadata][kafka][key]`: Record key, if any. -* `[@metadata][kafka][timestamp]`: Timestamp when this message was received by the Kafka broker. - -Please note that `@metadata` fields are not part of any of your events at output time. If you need these information to be -inserted into your original event, you'll have to use the `mutate` filter to manually copy the required fields into your `event`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kafka Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-auto_offset_reset>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-check_crcs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connections_max_idle_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-consumer_threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-decorate_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-enable_auto_commit>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_internal_topics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_max_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_min_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-group_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-key_deserializer_class>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_poll_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_poll_records>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-partition_assignment_strategy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-poll_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No -| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topics_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-value_deserializer_class>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms"] -===== `auto_commit_interval_ms` - - * Value type is <> - * Default value is `"5000"` - -The frequency in milliseconds that the consumer offsets are committed to Kafka. - -[id="{version}-plugins-{type}s-{plugin}-auto_offset_reset"] -===== `auto_offset_reset` - - * Value type is <> - * There is no default value for this setting. - -What to do when there is no initial offset in Kafka or if an offset is out of range: - -* earliest: automatically reset the offset to the earliest offset -* latest: automatically reset the offset to the latest offset -* none: throw exception to the consumer if no previous offset is found for the consumer's group -* anything else: throw exception to the consumer. - -[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] -===== `bootstrap_servers` - - * Value type is <> - * Default value is `"localhost:9092"` - -A list of URLs of Kafka instances to use for establishing the initial connection to the cluster. -This list should be in the form of `host1:port1,host2:port2` These urls are just used -for the initial connection to discover the full cluster membership (which may change dynamically) -so this list need not contain the full set of servers (you may want more than one, though, in -case a server is down). - -[id="{version}-plugins-{type}s-{plugin}-check_crcs"] -===== `check_crcs` - - * Value type is <> - * There is no default value for this setting. - -Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk -corruption to the messages occurred. This check adds some overhead, so it may be -disabled in cases seeking extreme performance. - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * Value type is <> - * Default value is `"logstash"` - -The id string to pass to the server when making requests. The purpose of this -is to be able to track the source of requests beyond just ip/port by allowing -a logical application name to be included. - -[id="{version}-plugins-{type}s-{plugin}-connections_max_idle_ms"] -===== `connections_max_idle_ms` - - * Value type is <> - * There is no default value for this setting. - -Close idle connections after the number of milliseconds specified by this config. - -[id="{version}-plugins-{type}s-{plugin}-consumer_threads"] -===== `consumer_threads` - - * Value type is <> - * Default value is `1` - -Ideally you should have as many threads as the number of partitions for a perfect -balance — more threads than partitions means that some threads will be idle - -[id="{version}-plugins-{type}s-{plugin}-decorate_events"] -===== `decorate_events` - - * Value type is <> - * Default value is `false` - -Option to add Kafka metadata like topic, message size to the event. -This will add a field named `kafka` to the logstash event containing the following attributes: - `topic`: The topic this message is associated with - `consumer_group`: The consumer group used to read in this event - `partition`: The partition this message is associated with - `offset`: The offset from the partition this message is associated with - `key`: A ByteBuffer containing the message key - -[id="{version}-plugins-{type}s-{plugin}-enable_auto_commit"] -===== `enable_auto_commit` - - * Value type is <> - * Default value is `"true"` - -If true, periodically commit to Kafka the offsets of messages already returned by the consumer. -This committed offset will be used when the process fails as the position from -which the consumption will begin. - -[id="{version}-plugins-{type}s-{plugin}-exclude_internal_topics"] -===== `exclude_internal_topics` - - * Value type is <> - * There is no default value for this setting. - -Whether records from internal topics (such as offsets) should be exposed to the consumer. -If set to true the only way to receive records from an internal topic is subscribing to it. - -[id="{version}-plugins-{type}s-{plugin}-fetch_max_bytes"] -===== `fetch_max_bytes` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of data the server should return for a fetch request. This is not an -absolute maximum, if the first message in the first non-empty partition of the fetch is larger -than this value, the message will still be returned to ensure that the consumer can make progress. - -[id="{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms"] -===== `fetch_max_wait_ms` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of time the server will block before answering the fetch request if -there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This -should be less than or equal to the timeout used in `poll_timeout_ms` - -[id="{version}-plugins-{type}s-{plugin}-fetch_min_bytes"] -===== `fetch_min_bytes` - - * Value type is <> - * There is no default value for this setting. - -The minimum amount of data the server should return for a fetch request. If insufficient -data is available the request will wait for that much data to accumulate -before answering the request. - -[id="{version}-plugins-{type}s-{plugin}-group_id"] -===== `group_id` - - * Value type is <> - * Default value is `"logstash"` - -The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber -that happens to be made up of multiple processors. Messages in a topic will be distributed to all -Logstash instances with the same `group_id` - -[id="{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms"] -===== `heartbeat_interval_ms` - - * Value type is <> - * There is no default value for this setting. - -The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure -that the consumer's session stays active and to facilitate rebalancing when new -consumers join or leave the group. The value must be set lower than -`session.timeout.ms`, but typically should be set no higher than 1/3 of that value. -It can be adjusted even lower to control the expected time for normal rebalances. - -[id="{version}-plugins-{type}s-{plugin}-jaas_path"] -===== `jaas_path` - - * Value type is <> - * There is no default value for this setting. - -The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization -services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: -[source,java] ----------------------------------- -KafkaClient { - com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - renewTicket=true - serviceName="kafka"; - }; ----------------------------------- - -Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these -to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same -`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on -different JVM instances. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] -===== `kerberos_config` - - * Value type is <> - * There is no default value for this setting. - -Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html - -[id="{version}-plugins-{type}s-{plugin}-key_deserializer_class"] -===== `key_deserializer_class` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` - -Java Class used to deserialize the record's key - -[id="{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes"] -===== `max_partition_fetch_bytes` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of data per-partition the server will return. The maximum total memory used for a -request will be #partitions * max.partition.fetch.bytes. This size must be at least -as large as the maximum message size the server allows or else it is possible for the producer to -send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying -to fetch a large message on a certain partition. - -[id="{version}-plugins-{type}s-{plugin}-max_poll_interval_ms"] -===== `max_poll_interval_ms` - - * Value type is <> - * There is no default value for this setting. - -The maximum delay between invocations of poll() when using consumer group management. This places -an upper bound on the amount of time that the consumer can be idle before fetching more records. -If poll() is not called before expiration of this timeout, then the consumer is considered failed and -the group will rebalance in order to reassign the partitions to another member. -The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms - -[id="{version}-plugins-{type}s-{plugin}-max_poll_records"] -===== `max_poll_records` - - * Value type is <> - * There is no default value for this setting. - -The maximum number of records returned in a single call to poll(). - -[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] -===== `metadata_max_age_ms` - - * Value type is <> - * There is no default value for this setting. - -The period of time in milliseconds after which we force a refresh of metadata even if -we haven't seen any partition leadership changes to proactively discover any new brokers or partitions - -[id="{version}-plugins-{type}s-{plugin}-partition_assignment_strategy"] -===== `partition_assignment_strategy` - - * Value type is <> - * There is no default value for this setting. - -The class name of the partition assignment strategy that the client will use to distribute -partition ownership amongst consumer instances - -[id="{version}-plugins-{type}s-{plugin}-poll_timeout_ms"] -===== `poll_timeout_ms` - - * Value type is <> - * Default value is `100` - -Time kafka consumer will wait to receive new messages from topics - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] -===== `reconnect_backoff_ms` - - * Value type is <> - * There is no default value for this setting. - -The amount of time to wait before attempting to reconnect to a given host. -This avoids repeatedly connecting to a host in a tight loop. -This backoff applies to all requests sent by the consumer to the broker. - -[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] -===== `request_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The configuration controls the maximum amount of time the client will wait -for the response of a request. If the response is not received before the timeout -elapses the client will resend the request if necessary or fail the request if -retries are exhausted. - -[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] -===== `retry_backoff_ms` - - * Value type is <> - * There is no default value for this setting. - -The amount of time to wait before attempting to retry a failed fetch request -to a given topic partition. This avoids repeated fetching-and-failing in a tight loop. - -[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] -===== `sasl_kerberos_service_name` - - * Value type is <> - * There is no default value for this setting. - -The Kerberos principal name that Kafka broker runs as. -This can be defined either in Kafka's JAAS config or in Kafka's config. - -[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] -===== `sasl_mechanism` - - * Value type is <> - * Default value is `"GSSAPI"` - -http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. -This may be any mechanism for which a security provider is available. -GSSAPI is the default mechanism. - -[id="{version}-plugins-{type}s-{plugin}-security_protocol"] -===== `security_protocol` - - * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` - * Default value is `"PLAINTEXT"` - -Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL - -[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] -===== `send_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The size of the TCP send buffer (SO_SNDBUF) to use when sending data - -[id="{version}-plugins-{type}s-{plugin}-session_timeout_ms"] -===== `session_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead -and a rebalance operation is triggered for the group identified by `group_id` - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `false` - -Enable SSL/TLS secured communication to Kafka broker. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] -===== `ssl_key_password` - - * Value type is <> - * There is no default value for this setting. - -The password of the private key in the key store file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] -===== `ssl_keystore_location` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore path. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] -===== `ssl_keystore_password` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] -===== `ssl_keystore_type` - - * Value type is <> - * There is no default value for this setting. - -The keystore type. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] -===== `ssl_truststore_location` - - * Value type is <> - * There is no default value for this setting. - -The JKS truststore path to validate the Kafka broker's certificate. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] -===== `ssl_truststore_password` - - * Value type is <> - * There is no default value for this setting. - -The truststore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] -===== `ssl_truststore_type` - - * Value type is <> - * There is no default value for this setting. - -The truststore type. - -[id="{version}-plugins-{type}s-{plugin}-topics"] -===== `topics` - - * Value type is <> - * Default value is `["logstash"]` - -A list of topics to subscribe to, defaults to ["logstash"]. - -[id="{version}-plugins-{type}s-{plugin}-topics_pattern"] -===== `topics_pattern` - - * Value type is <> - * There is no default value for this setting. - -A topic regex pattern to subscribe to. -The topics configuration will be ignored when using this configuration. - -[id="{version}-plugins-{type}s-{plugin}-value_deserializer_class"] -===== `value_deserializer_class` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` - -Java Class used to deserialize the record's value - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/kafka-v8.0.0.asciidoc b/docs/versioned-plugins/inputs/kafka-v8.0.0.asciidoc deleted file mode 100644 index 75474319f..000000000 --- a/docs/versioned-plugins/inputs/kafka-v8.0.0.asciidoc +++ /dev/null @@ -1,557 +0,0 @@ -:plugin: kafka -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v8.0.0 -:release_date: 2017-08-01 -:changelog_url: https://github.com/logstash-plugins/logstash-input-kafka/blob/v8.0.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Kafka input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will read events from a Kafka topic. It uses the 0.10 version of -the consumer API provided by Kafka to read messages from the broker. - -Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination -of Logstash and the Kafka input plugin: - -[options="header"] -|========================================================== -|Kafka Client Version |Logstash Version |Plugin Version |Why? -|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular -|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) -|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) -|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker -|0.10.1.x |2.4.x - 5.x.x | 6.x.x | -|0.11.0.0 |2.4.x - 5.x.x | 6.x.x |Not compatible with the <= 0.9 broker -|========================================================== - -NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should -upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker -is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. - -This input supports connecting to Kafka over: - -* SSL (requires plugin version 3.0.0 or later) -* Kerberos SASL (requires plugin version 5.1.0 or later) - -By default security is disabled but can be turned on as needed. - -The Logstash Kafka consumer handles group management and uses the default offset management -strategy using Kafka topics. - -Logstash instances by default form a single logical group to subscribe to Kafka topics -Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively, -you could run multiple Logstash instances with the same `group_id` to spread the load across -physical machines. Messages in a topic will be distributed to all Logstash instances with -the same `group_id`. - -Ideally you should have as many threads as the number of partitions for a perfect balance -- -more threads than partitions means that some threads will be idle - -For more information see http://kafka.apache.org/documentation.html#theconsumer - -Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs - -==== Metadata fields - -The following metadata from Kafka broker are added under the `[@metadata]` field: - -* `[@metadata][kafka][topic]`: Original Kafka topic from where the message was consumed. -* `[@metadata][kafka][consumer_group]`: Consumer group -* `[@metadata][kafka][partition]`: Partition info for this message. -* `[@metadata][kafka][offset]`: Original record offset for this message. -* `[@metadata][kafka][key]`: Record key, if any. -* `[@metadata][kafka][timestamp]`: Timestamp when this message was received by the Kafka broker. - -Please note that `@metadata` fields are not part of any of your events at output time. If you need these information to be -inserted into your original event, you'll have to use the `mutate` filter to manually copy the required fields into your `event`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kafka Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-auto_offset_reset>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-check_crcs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connections_max_idle_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-consumer_threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-decorate_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-enable_auto_commit>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_internal_topics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_max_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_min_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-group_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-key_deserializer_class>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_poll_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_poll_records>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-partition_assignment_strategy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-poll_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No -| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topics_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-value_deserializer_class>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms"] -===== `auto_commit_interval_ms` - - * Value type is <> - * Default value is `"5000"` - -The frequency in milliseconds that the consumer offsets are committed to Kafka. - -[id="{version}-plugins-{type}s-{plugin}-auto_offset_reset"] -===== `auto_offset_reset` - - * Value type is <> - * There is no default value for this setting. - -What to do when there is no initial offset in Kafka or if an offset is out of range: - -* earliest: automatically reset the offset to the earliest offset -* latest: automatically reset the offset to the latest offset -* none: throw exception to the consumer if no previous offset is found for the consumer's group -* anything else: throw exception to the consumer. - -[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] -===== `bootstrap_servers` - - * Value type is <> - * Default value is `"localhost:9092"` - -A list of URLs of Kafka instances to use for establishing the initial connection to the cluster. -This list should be in the form of `host1:port1,host2:port2` These urls are just used -for the initial connection to discover the full cluster membership (which may change dynamically) -so this list need not contain the full set of servers (you may want more than one, though, in -case a server is down). - -[id="{version}-plugins-{type}s-{plugin}-check_crcs"] -===== `check_crcs` - - * Value type is <> - * There is no default value for this setting. - -Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk -corruption to the messages occurred. This check adds some overhead, so it may be -disabled in cases seeking extreme performance. - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * Value type is <> - * Default value is `"logstash"` - -The id string to pass to the server when making requests. The purpose of this -is to be able to track the source of requests beyond just ip/port by allowing -a logical application name to be included. - -[id="{version}-plugins-{type}s-{plugin}-connections_max_idle_ms"] -===== `connections_max_idle_ms` - - * Value type is <> - * There is no default value for this setting. - -Close idle connections after the number of milliseconds specified by this config. - -[id="{version}-plugins-{type}s-{plugin}-consumer_threads"] -===== `consumer_threads` - - * Value type is <> - * Default value is `1` - -Ideally you should have as many threads as the number of partitions for a perfect -balance — more threads than partitions means that some threads will be idle - -[id="{version}-plugins-{type}s-{plugin}-decorate_events"] -===== `decorate_events` - - * Value type is <> - * Default value is `false` - -Option to add Kafka metadata like topic, message size to the event. -This will add a field named `kafka` to the logstash event containing the following attributes: - `topic`: The topic this message is associated with - `consumer_group`: The consumer group used to read in this event - `partition`: The partition this message is associated with - `offset`: The offset from the partition this message is associated with - `key`: A ByteBuffer containing the message key - -[id="{version}-plugins-{type}s-{plugin}-enable_auto_commit"] -===== `enable_auto_commit` - - * Value type is <> - * Default value is `"true"` - -If true, periodically commit to Kafka the offsets of messages already returned by the consumer. -This committed offset will be used when the process fails as the position from -which the consumption will begin. - -[id="{version}-plugins-{type}s-{plugin}-exclude_internal_topics"] -===== `exclude_internal_topics` - - * Value type is <> - * There is no default value for this setting. - -Whether records from internal topics (such as offsets) should be exposed to the consumer. -If set to true the only way to receive records from an internal topic is subscribing to it. - -[id="{version}-plugins-{type}s-{plugin}-fetch_max_bytes"] -===== `fetch_max_bytes` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of data the server should return for a fetch request. This is not an -absolute maximum, if the first message in the first non-empty partition of the fetch is larger -than this value, the message will still be returned to ensure that the consumer can make progress. - -[id="{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms"] -===== `fetch_max_wait_ms` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of time the server will block before answering the fetch request if -there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This -should be less than or equal to the timeout used in `poll_timeout_ms` - -[id="{version}-plugins-{type}s-{plugin}-fetch_min_bytes"] -===== `fetch_min_bytes` - - * Value type is <> - * There is no default value for this setting. - -The minimum amount of data the server should return for a fetch request. If insufficient -data is available the request will wait for that much data to accumulate -before answering the request. - -[id="{version}-plugins-{type}s-{plugin}-group_id"] -===== `group_id` - - * Value type is <> - * Default value is `"logstash"` - -The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber -that happens to be made up of multiple processors. Messages in a topic will be distributed to all -Logstash instances with the same `group_id` - -[id="{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms"] -===== `heartbeat_interval_ms` - - * Value type is <> - * There is no default value for this setting. - -The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure -that the consumer's session stays active and to facilitate rebalancing when new -consumers join or leave the group. The value must be set lower than -`session.timeout.ms`, but typically should be set no higher than 1/3 of that value. -It can be adjusted even lower to control the expected time for normal rebalances. - -[id="{version}-plugins-{type}s-{plugin}-jaas_path"] -===== `jaas_path` - - * Value type is <> - * There is no default value for this setting. - -The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization -services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: -[source,java] ----------------------------------- -KafkaClient { - com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - renewTicket=true - serviceName="kafka"; - }; ----------------------------------- - -Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these -to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same -`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on -different JVM instances. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] -===== `kerberos_config` - - * Value type is <> - * There is no default value for this setting. - -Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html - -[id="{version}-plugins-{type}s-{plugin}-key_deserializer_class"] -===== `key_deserializer_class` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` - -Java Class used to deserialize the record's key - -[id="{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes"] -===== `max_partition_fetch_bytes` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of data per-partition the server will return. The maximum total memory used for a -request will be #partitions * max.partition.fetch.bytes. This size must be at least -as large as the maximum message size the server allows or else it is possible for the producer to -send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying -to fetch a large message on a certain partition. - -[id="{version}-plugins-{type}s-{plugin}-max_poll_interval_ms"] -===== `max_poll_interval_ms` - - * Value type is <> - * There is no default value for this setting. - -The maximum delay between invocations of poll() when using consumer group management. This places -an upper bound on the amount of time that the consumer can be idle before fetching more records. -If poll() is not called before expiration of this timeout, then the consumer is considered failed and -the group will rebalance in order to reassign the partitions to another member. -The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms - -[id="{version}-plugins-{type}s-{plugin}-max_poll_records"] -===== `max_poll_records` - - * Value type is <> - * There is no default value for this setting. - -The maximum number of records returned in a single call to poll(). - -[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] -===== `metadata_max_age_ms` - - * Value type is <> - * There is no default value for this setting. - -The period of time in milliseconds after which we force a refresh of metadata even if -we haven't seen any partition leadership changes to proactively discover any new brokers or partitions - -[id="{version}-plugins-{type}s-{plugin}-partition_assignment_strategy"] -===== `partition_assignment_strategy` - - * Value type is <> - * There is no default value for this setting. - -The class name of the partition assignment strategy that the client will use to distribute -partition ownership amongst consumer instances - -[id="{version}-plugins-{type}s-{plugin}-poll_timeout_ms"] -===== `poll_timeout_ms` - - * Value type is <> - * Default value is `100` - -Time kafka consumer will wait to receive new messages from topics - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] -===== `reconnect_backoff_ms` - - * Value type is <> - * There is no default value for this setting. - -The amount of time to wait before attempting to reconnect to a given host. -This avoids repeatedly connecting to a host in a tight loop. -This backoff applies to all requests sent by the consumer to the broker. - -[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] -===== `request_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The configuration controls the maximum amount of time the client will wait -for the response of a request. If the response is not received before the timeout -elapses the client will resend the request if necessary or fail the request if -retries are exhausted. - -[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] -===== `retry_backoff_ms` - - * Value type is <> - * There is no default value for this setting. - -The amount of time to wait before attempting to retry a failed fetch request -to a given topic partition. This avoids repeated fetching-and-failing in a tight loop. - -[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] -===== `sasl_kerberos_service_name` - - * Value type is <> - * There is no default value for this setting. - -The Kerberos principal name that Kafka broker runs as. -This can be defined either in Kafka's JAAS config or in Kafka's config. - -[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] -===== `sasl_mechanism` - - * Value type is <> - * Default value is `"GSSAPI"` - -http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. -This may be any mechanism for which a security provider is available. -GSSAPI is the default mechanism. - -[id="{version}-plugins-{type}s-{plugin}-security_protocol"] -===== `security_protocol` - - * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` - * Default value is `"PLAINTEXT"` - -Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL - -[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] -===== `send_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The size of the TCP send buffer (SO_SNDBUF) to use when sending data - -[id="{version}-plugins-{type}s-{plugin}-session_timeout_ms"] -===== `session_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead -and a rebalance operation is triggered for the group identified by `group_id` - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] -===== `ssl_key_password` - - * Value type is <> - * There is no default value for this setting. - -The password of the private key in the key store file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] -===== `ssl_keystore_location` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore path. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] -===== `ssl_keystore_password` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] -===== `ssl_keystore_type` - - * Value type is <> - * There is no default value for this setting. - -The keystore type. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] -===== `ssl_truststore_location` - - * Value type is <> - * There is no default value for this setting. - -The JKS truststore path to validate the Kafka broker's certificate. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] -===== `ssl_truststore_password` - - * Value type is <> - * There is no default value for this setting. - -The truststore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] -===== `ssl_truststore_type` - - * Value type is <> - * There is no default value for this setting. - -The truststore type. - -[id="{version}-plugins-{type}s-{plugin}-topics"] -===== `topics` - - * Value type is <> - * Default value is `["logstash"]` - -A list of topics to subscribe to, defaults to ["logstash"]. - -[id="{version}-plugins-{type}s-{plugin}-topics_pattern"] -===== `topics_pattern` - - * Value type is <> - * There is no default value for this setting. - -A topic regex pattern to subscribe to. -The topics configuration will be ignored when using this configuration. - -[id="{version}-plugins-{type}s-{plugin}-value_deserializer_class"] -===== `value_deserializer_class` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` - -Java Class used to deserialize the record's value - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/kafka-v8.0.2.asciidoc b/docs/versioned-plugins/inputs/kafka-v8.0.2.asciidoc deleted file mode 100644 index 1acd88e0c..000000000 --- a/docs/versioned-plugins/inputs/kafka-v8.0.2.asciidoc +++ /dev/null @@ -1,557 +0,0 @@ -:plugin: kafka -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v8.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-kafka/blob/v8.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Kafka input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will read events from a Kafka topic. It uses the 0.10 version of -the consumer API provided by Kafka to read messages from the broker. - -Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination -of Logstash and the Kafka input plugin: - -[options="header"] -|========================================================== -|Kafka Client Version |Logstash Version |Plugin Version |Why? -|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular -|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) -|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) -|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker -|0.10.1.x |2.4.x - 5.x.x | 6.x.x | -|0.11.0.0 |2.4.x - 5.x.x | 6.x.x |Not compatible with the <= 0.9 broker -|========================================================== - -NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should -upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker -is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. - -This input supports connecting to Kafka over: - -* SSL (requires plugin version 3.0.0 or later) -* Kerberos SASL (requires plugin version 5.1.0 or later) - -By default security is disabled but can be turned on as needed. - -The Logstash Kafka consumer handles group management and uses the default offset management -strategy using Kafka topics. - -Logstash instances by default form a single logical group to subscribe to Kafka topics -Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively, -you could run multiple Logstash instances with the same `group_id` to spread the load across -physical machines. Messages in a topic will be distributed to all Logstash instances with -the same `group_id`. - -Ideally you should have as many threads as the number of partitions for a perfect balance -- -more threads than partitions means that some threads will be idle - -For more information see http://kafka.apache.org/documentation.html#theconsumer - -Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs - -==== Metadata fields - -The following metadata from Kafka broker are added under the `[@metadata]` field: - -* `[@metadata][kafka][topic]`: Original Kafka topic from where the message was consumed. -* `[@metadata][kafka][consumer_group]`: Consumer group -* `[@metadata][kafka][partition]`: Partition info for this message. -* `[@metadata][kafka][offset]`: Original record offset for this message. -* `[@metadata][kafka][key]`: Record key, if any. -* `[@metadata][kafka][timestamp]`: Timestamp when this message was received by the Kafka broker. - -Please note that `@metadata` fields are not part of any of your events at output time. If you need these information to be -inserted into your original event, you'll have to use the `mutate` filter to manually copy the required fields into your `event`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kafka Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-auto_offset_reset>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-check_crcs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connections_max_idle_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-consumer_threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-decorate_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-enable_auto_commit>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_internal_topics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_max_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_min_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-group_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-key_deserializer_class>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_poll_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_poll_records>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-partition_assignment_strategy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-poll_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No -| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topics_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-value_deserializer_class>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms"] -===== `auto_commit_interval_ms` - - * Value type is <> - * Default value is `"5000"` - -The frequency in milliseconds that the consumer offsets are committed to Kafka. - -[id="{version}-plugins-{type}s-{plugin}-auto_offset_reset"] -===== `auto_offset_reset` - - * Value type is <> - * There is no default value for this setting. - -What to do when there is no initial offset in Kafka or if an offset is out of range: - -* earliest: automatically reset the offset to the earliest offset -* latest: automatically reset the offset to the latest offset -* none: throw exception to the consumer if no previous offset is found for the consumer's group -* anything else: throw exception to the consumer. - -[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] -===== `bootstrap_servers` - - * Value type is <> - * Default value is `"localhost:9092"` - -A list of URLs of Kafka instances to use for establishing the initial connection to the cluster. -This list should be in the form of `host1:port1,host2:port2` These urls are just used -for the initial connection to discover the full cluster membership (which may change dynamically) -so this list need not contain the full set of servers (you may want more than one, though, in -case a server is down). - -[id="{version}-plugins-{type}s-{plugin}-check_crcs"] -===== `check_crcs` - - * Value type is <> - * There is no default value for this setting. - -Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk -corruption to the messages occurred. This check adds some overhead, so it may be -disabled in cases seeking extreme performance. - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * Value type is <> - * Default value is `"logstash"` - -The id string to pass to the server when making requests. The purpose of this -is to be able to track the source of requests beyond just ip/port by allowing -a logical application name to be included. - -[id="{version}-plugins-{type}s-{plugin}-connections_max_idle_ms"] -===== `connections_max_idle_ms` - - * Value type is <> - * There is no default value for this setting. - -Close idle connections after the number of milliseconds specified by this config. - -[id="{version}-plugins-{type}s-{plugin}-consumer_threads"] -===== `consumer_threads` - - * Value type is <> - * Default value is `1` - -Ideally you should have as many threads as the number of partitions for a perfect -balance — more threads than partitions means that some threads will be idle - -[id="{version}-plugins-{type}s-{plugin}-decorate_events"] -===== `decorate_events` - - * Value type is <> - * Default value is `false` - -Option to add Kafka metadata like topic, message size to the event. -This will add a field named `kafka` to the logstash event containing the following attributes: - `topic`: The topic this message is associated with - `consumer_group`: The consumer group used to read in this event - `partition`: The partition this message is associated with - `offset`: The offset from the partition this message is associated with - `key`: A ByteBuffer containing the message key - -[id="{version}-plugins-{type}s-{plugin}-enable_auto_commit"] -===== `enable_auto_commit` - - * Value type is <> - * Default value is `"true"` - -If true, periodically commit to Kafka the offsets of messages already returned by the consumer. -This committed offset will be used when the process fails as the position from -which the consumption will begin. - -[id="{version}-plugins-{type}s-{plugin}-exclude_internal_topics"] -===== `exclude_internal_topics` - - * Value type is <> - * There is no default value for this setting. - -Whether records from internal topics (such as offsets) should be exposed to the consumer. -If set to true the only way to receive records from an internal topic is subscribing to it. - -[id="{version}-plugins-{type}s-{plugin}-fetch_max_bytes"] -===== `fetch_max_bytes` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of data the server should return for a fetch request. This is not an -absolute maximum, if the first message in the first non-empty partition of the fetch is larger -than this value, the message will still be returned to ensure that the consumer can make progress. - -[id="{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms"] -===== `fetch_max_wait_ms` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of time the server will block before answering the fetch request if -there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This -should be less than or equal to the timeout used in `poll_timeout_ms` - -[id="{version}-plugins-{type}s-{plugin}-fetch_min_bytes"] -===== `fetch_min_bytes` - - * Value type is <> - * There is no default value for this setting. - -The minimum amount of data the server should return for a fetch request. If insufficient -data is available the request will wait for that much data to accumulate -before answering the request. - -[id="{version}-plugins-{type}s-{plugin}-group_id"] -===== `group_id` - - * Value type is <> - * Default value is `"logstash"` - -The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber -that happens to be made up of multiple processors. Messages in a topic will be distributed to all -Logstash instances with the same `group_id` - -[id="{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms"] -===== `heartbeat_interval_ms` - - * Value type is <> - * There is no default value for this setting. - -The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure -that the consumer's session stays active and to facilitate rebalancing when new -consumers join or leave the group. The value must be set lower than -`session.timeout.ms`, but typically should be set no higher than 1/3 of that value. -It can be adjusted even lower to control the expected time for normal rebalances. - -[id="{version}-plugins-{type}s-{plugin}-jaas_path"] -===== `jaas_path` - - * Value type is <> - * There is no default value for this setting. - -The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization -services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: -[source,java] ----------------------------------- -KafkaClient { - com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - renewTicket=true - serviceName="kafka"; - }; ----------------------------------- - -Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these -to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same -`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on -different JVM instances. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] -===== `kerberos_config` - - * Value type is <> - * There is no default value for this setting. - -Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html - -[id="{version}-plugins-{type}s-{plugin}-key_deserializer_class"] -===== `key_deserializer_class` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` - -Java Class used to deserialize the record's key - -[id="{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes"] -===== `max_partition_fetch_bytes` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of data per-partition the server will return. The maximum total memory used for a -request will be #partitions * max.partition.fetch.bytes. This size must be at least -as large as the maximum message size the server allows or else it is possible for the producer to -send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying -to fetch a large message on a certain partition. - -[id="{version}-plugins-{type}s-{plugin}-max_poll_interval_ms"] -===== `max_poll_interval_ms` - - * Value type is <> - * There is no default value for this setting. - -The maximum delay between invocations of poll() when using consumer group management. This places -an upper bound on the amount of time that the consumer can be idle before fetching more records. -If poll() is not called before expiration of this timeout, then the consumer is considered failed and -the group will rebalance in order to reassign the partitions to another member. -The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms - -[id="{version}-plugins-{type}s-{plugin}-max_poll_records"] -===== `max_poll_records` - - * Value type is <> - * There is no default value for this setting. - -The maximum number of records returned in a single call to poll(). - -[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] -===== `metadata_max_age_ms` - - * Value type is <> - * There is no default value for this setting. - -The period of time in milliseconds after which we force a refresh of metadata even if -we haven't seen any partition leadership changes to proactively discover any new brokers or partitions - -[id="{version}-plugins-{type}s-{plugin}-partition_assignment_strategy"] -===== `partition_assignment_strategy` - - * Value type is <> - * There is no default value for this setting. - -The class name of the partition assignment strategy that the client will use to distribute -partition ownership amongst consumer instances - -[id="{version}-plugins-{type}s-{plugin}-poll_timeout_ms"] -===== `poll_timeout_ms` - - * Value type is <> - * Default value is `100` - -Time kafka consumer will wait to receive new messages from topics - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] -===== `reconnect_backoff_ms` - - * Value type is <> - * There is no default value for this setting. - -The amount of time to wait before attempting to reconnect to a given host. -This avoids repeatedly connecting to a host in a tight loop. -This backoff applies to all requests sent by the consumer to the broker. - -[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] -===== `request_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The configuration controls the maximum amount of time the client will wait -for the response of a request. If the response is not received before the timeout -elapses the client will resend the request if necessary or fail the request if -retries are exhausted. - -[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] -===== `retry_backoff_ms` - - * Value type is <> - * There is no default value for this setting. - -The amount of time to wait before attempting to retry a failed fetch request -to a given topic partition. This avoids repeated fetching-and-failing in a tight loop. - -[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] -===== `sasl_kerberos_service_name` - - * Value type is <> - * There is no default value for this setting. - -The Kerberos principal name that Kafka broker runs as. -This can be defined either in Kafka's JAAS config or in Kafka's config. - -[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] -===== `sasl_mechanism` - - * Value type is <> - * Default value is `"GSSAPI"` - -http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. -This may be any mechanism for which a security provider is available. -GSSAPI is the default mechanism. - -[id="{version}-plugins-{type}s-{plugin}-security_protocol"] -===== `security_protocol` - - * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` - * Default value is `"PLAINTEXT"` - -Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL - -[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] -===== `send_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The size of the TCP send buffer (SO_SNDBUF) to use when sending data - -[id="{version}-plugins-{type}s-{plugin}-session_timeout_ms"] -===== `session_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead -and a rebalance operation is triggered for the group identified by `group_id` - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] -===== `ssl_key_password` - - * Value type is <> - * There is no default value for this setting. - -The password of the private key in the key store file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] -===== `ssl_keystore_location` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore path. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] -===== `ssl_keystore_password` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] -===== `ssl_keystore_type` - - * Value type is <> - * There is no default value for this setting. - -The keystore type. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] -===== `ssl_truststore_location` - - * Value type is <> - * There is no default value for this setting. - -The JKS truststore path to validate the Kafka broker's certificate. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] -===== `ssl_truststore_password` - - * Value type is <> - * There is no default value for this setting. - -The truststore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] -===== `ssl_truststore_type` - - * Value type is <> - * There is no default value for this setting. - -The truststore type. - -[id="{version}-plugins-{type}s-{plugin}-topics"] -===== `topics` - - * Value type is <> - * Default value is `["logstash"]` - -A list of topics to subscribe to, defaults to ["logstash"]. - -[id="{version}-plugins-{type}s-{plugin}-topics_pattern"] -===== `topics_pattern` - - * Value type is <> - * There is no default value for this setting. - -A topic regex pattern to subscribe to. -The topics configuration will be ignored when using this configuration. - -[id="{version}-plugins-{type}s-{plugin}-value_deserializer_class"] -===== `value_deserializer_class` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` - -Java Class used to deserialize the record's value - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/kafka-v8.0.4.asciidoc b/docs/versioned-plugins/inputs/kafka-v8.0.4.asciidoc deleted file mode 100644 index 2cd1b4685..000000000 --- a/docs/versioned-plugins/inputs/kafka-v8.0.4.asciidoc +++ /dev/null @@ -1,542 +0,0 @@ -:plugin: kafka -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v8.0.4 -:release_date: 2018-01-05 -:changelog_url: https://github.com/logstash-plugins/logstash-input-kafka/blob/v8.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Kafka input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will read events from a Kafka topic. - -This plugin uses Kafka Client 1.0.0. For broker compatibility, see the official https://cwiki.apache.org/confluence/display/KAFKA/Compatibility+Matrix[Kafka compatibility reference]. - -If you're using a plugin version that was released after {version}, see the https://www.elastic.co/guide/en/logstash/master/plugins-inputs-kafka.html[latest plugin documentation] for updated information about Kafka compatibility. - -This input supports connecting to Kafka over: - -* SSL (requires plugin version 3.0.0 or later) -* Kerberos SASL (requires plugin version 5.1.0 or later) - -By default security is disabled but can be turned on as needed. - -The Logstash Kafka consumer handles group management and uses the default offset management -strategy using Kafka topics. - -Logstash instances by default form a single logical group to subscribe to Kafka topics -Each Logstash Kafka consumer can run multiple threads to increase read throughput. Alternatively, -you could run multiple Logstash instances with the same `group_id` to spread the load across -physical machines. Messages in a topic will be distributed to all Logstash instances with -the same `group_id`. - -Ideally you should have as many threads as the number of partitions for a perfect balance -- -more threads than partitions means that some threads will be idle - -For more information see http://kafka.apache.org/documentation.html#theconsumer - -Kafka consumer configuration: http://kafka.apache.org/documentation.html#consumerconfigs - -==== Metadata fields - -The following metadata from Kafka broker are added under the `[@metadata]` field: - -* `[@metadata][kafka][topic]`: Original Kafka topic from where the message was consumed. -* `[@metadata][kafka][consumer_group]`: Consumer group -* `[@metadata][kafka][partition]`: Partition info for this message. -* `[@metadata][kafka][offset]`: Original record offset for this message. -* `[@metadata][kafka][key]`: Record key, if any. -* `[@metadata][kafka][timestamp]`: Timestamp when this message was received by the Kafka broker. - -Please note that `@metadata` fields are not part of any of your events at output time. If you need these information to be -inserted into your original event, you'll have to use the `mutate` filter to manually copy the required fields into your `event`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kafka Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-auto_offset_reset>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-check_crcs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connections_max_idle_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-consumer_threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-decorate_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-enable_auto_commit>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_internal_topics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_max_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fetch_min_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-group_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-key_deserializer_class>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_poll_interval_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_poll_records>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-partition_assignment_strategy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-poll_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No -| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topics_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-value_deserializer_class>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-auto_commit_interval_ms"] -===== `auto_commit_interval_ms` - - * Value type is <> - * Default value is `"5000"` - -The frequency in milliseconds that the consumer offsets are committed to Kafka. - -[id="{version}-plugins-{type}s-{plugin}-auto_offset_reset"] -===== `auto_offset_reset` - - * Value type is <> - * There is no default value for this setting. - -What to do when there is no initial offset in Kafka or if an offset is out of range: - -* earliest: automatically reset the offset to the earliest offset -* latest: automatically reset the offset to the latest offset -* none: throw exception to the consumer if no previous offset is found for the consumer's group -* anything else: throw exception to the consumer. - -[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] -===== `bootstrap_servers` - - * Value type is <> - * Default value is `"localhost:9092"` - -A list of URLs of Kafka instances to use for establishing the initial connection to the cluster. -This list should be in the form of `host1:port1,host2:port2` These urls are just used -for the initial connection to discover the full cluster membership (which may change dynamically) -so this list need not contain the full set of servers (you may want more than one, though, in -case a server is down). - -[id="{version}-plugins-{type}s-{plugin}-check_crcs"] -===== `check_crcs` - - * Value type is <> - * There is no default value for this setting. - -Automatically check the CRC32 of the records consumed. This ensures no on-the-wire or on-disk -corruption to the messages occurred. This check adds some overhead, so it may be -disabled in cases seeking extreme performance. - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * Value type is <> - * Default value is `"logstash"` - -The id string to pass to the server when making requests. The purpose of this -is to be able to track the source of requests beyond just ip/port by allowing -a logical application name to be included. - -[id="{version}-plugins-{type}s-{plugin}-connections_max_idle_ms"] -===== `connections_max_idle_ms` - - * Value type is <> - * There is no default value for this setting. - -Close idle connections after the number of milliseconds specified by this config. - -[id="{version}-plugins-{type}s-{plugin}-consumer_threads"] -===== `consumer_threads` - - * Value type is <> - * Default value is `1` - -Ideally you should have as many threads as the number of partitions for a perfect -balance — more threads than partitions means that some threads will be idle - -[id="{version}-plugins-{type}s-{plugin}-decorate_events"] -===== `decorate_events` - - * Value type is <> - * Default value is `false` - -Option to add Kafka metadata like topic, message size to the event. -This will add a field named `kafka` to the logstash event containing the following attributes: - `topic`: The topic this message is associated with - `consumer_group`: The consumer group used to read in this event - `partition`: The partition this message is associated with - `offset`: The offset from the partition this message is associated with - `key`: A ByteBuffer containing the message key - -[id="{version}-plugins-{type}s-{plugin}-enable_auto_commit"] -===== `enable_auto_commit` - - * Value type is <> - * Default value is `"true"` - -If true, periodically commit to Kafka the offsets of messages already returned by the consumer. -This committed offset will be used when the process fails as the position from -which the consumption will begin. - -[id="{version}-plugins-{type}s-{plugin}-exclude_internal_topics"] -===== `exclude_internal_topics` - - * Value type is <> - * There is no default value for this setting. - -Whether records from internal topics (such as offsets) should be exposed to the consumer. -If set to true the only way to receive records from an internal topic is subscribing to it. - -[id="{version}-plugins-{type}s-{plugin}-fetch_max_bytes"] -===== `fetch_max_bytes` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of data the server should return for a fetch request. This is not an -absolute maximum, if the first message in the first non-empty partition of the fetch is larger -than this value, the message will still be returned to ensure that the consumer can make progress. - -[id="{version}-plugins-{type}s-{plugin}-fetch_max_wait_ms"] -===== `fetch_max_wait_ms` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of time the server will block before answering the fetch request if -there isn't sufficient data to immediately satisfy `fetch_min_bytes`. This -should be less than or equal to the timeout used in `poll_timeout_ms` - -[id="{version}-plugins-{type}s-{plugin}-fetch_min_bytes"] -===== `fetch_min_bytes` - - * Value type is <> - * There is no default value for this setting. - -The minimum amount of data the server should return for a fetch request. If insufficient -data is available the request will wait for that much data to accumulate -before answering the request. - -[id="{version}-plugins-{type}s-{plugin}-group_id"] -===== `group_id` - - * Value type is <> - * Default value is `"logstash"` - -The identifier of the group this consumer belongs to. Consumer group is a single logical subscriber -that happens to be made up of multiple processors. Messages in a topic will be distributed to all -Logstash instances with the same `group_id` - -[id="{version}-plugins-{type}s-{plugin}-heartbeat_interval_ms"] -===== `heartbeat_interval_ms` - - * Value type is <> - * There is no default value for this setting. - -The expected time between heartbeats to the consumer coordinator. Heartbeats are used to ensure -that the consumer's session stays active and to facilitate rebalancing when new -consumers join or leave the group. The value must be set lower than -`session.timeout.ms`, but typically should be set no higher than 1/3 of that value. -It can be adjusted even lower to control the expected time for normal rebalances. - -[id="{version}-plugins-{type}s-{plugin}-jaas_path"] -===== `jaas_path` - - * Value type is <> - * There is no default value for this setting. - -The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization -services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: -[source,java] ----------------------------------- -KafkaClient { - com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - renewTicket=true - serviceName="kafka"; - }; ----------------------------------- - -Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these -to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same -`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on -different JVM instances. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] -===== `kerberos_config` - - * Value type is <> - * There is no default value for this setting. - -Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html - -[id="{version}-plugins-{type}s-{plugin}-key_deserializer_class"] -===== `key_deserializer_class` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` - -Java Class used to deserialize the record's key - -[id="{version}-plugins-{type}s-{plugin}-max_partition_fetch_bytes"] -===== `max_partition_fetch_bytes` - - * Value type is <> - * There is no default value for this setting. - -The maximum amount of data per-partition the server will return. The maximum total memory used for a -request will be #partitions * max.partition.fetch.bytes. This size must be at least -as large as the maximum message size the server allows or else it is possible for the producer to -send messages larger than the consumer can fetch. If that happens, the consumer can get stuck trying -to fetch a large message on a certain partition. - -[id="{version}-plugins-{type}s-{plugin}-max_poll_interval_ms"] -===== `max_poll_interval_ms` - - * Value type is <> - * There is no default value for this setting. - -The maximum delay between invocations of poll() when using consumer group management. This places -an upper bound on the amount of time that the consumer can be idle before fetching more records. -If poll() is not called before expiration of this timeout, then the consumer is considered failed and -the group will rebalance in order to reassign the partitions to another member. -The value of the configuration `request_timeout_ms` must always be larger than max_poll_interval_ms - -[id="{version}-plugins-{type}s-{plugin}-max_poll_records"] -===== `max_poll_records` - - * Value type is <> - * There is no default value for this setting. - -The maximum number of records returned in a single call to poll(). - -[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] -===== `metadata_max_age_ms` - - * Value type is <> - * There is no default value for this setting. - -The period of time in milliseconds after which we force a refresh of metadata even if -we haven't seen any partition leadership changes to proactively discover any new brokers or partitions - -[id="{version}-plugins-{type}s-{plugin}-partition_assignment_strategy"] -===== `partition_assignment_strategy` - - * Value type is <> - * There is no default value for this setting. - -The class name of the partition assignment strategy that the client will use to distribute -partition ownership amongst consumer instances - -[id="{version}-plugins-{type}s-{plugin}-poll_timeout_ms"] -===== `poll_timeout_ms` - - * Value type is <> - * Default value is `100` - -Time kafka consumer will wait to receive new messages from topics - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The size of the TCP receive buffer (SO_RCVBUF) to use when reading data. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] -===== `reconnect_backoff_ms` - - * Value type is <> - * There is no default value for this setting. - -The amount of time to wait before attempting to reconnect to a given host. -This avoids repeatedly connecting to a host in a tight loop. -This backoff applies to all requests sent by the consumer to the broker. - -[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] -===== `request_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The configuration controls the maximum amount of time the client will wait -for the response of a request. If the response is not received before the timeout -elapses the client will resend the request if necessary or fail the request if -retries are exhausted. - -[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] -===== `retry_backoff_ms` - - * Value type is <> - * There is no default value for this setting. - -The amount of time to wait before attempting to retry a failed fetch request -to a given topic partition. This avoids repeated fetching-and-failing in a tight loop. - -[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] -===== `sasl_kerberos_service_name` - - * Value type is <> - * There is no default value for this setting. - -The Kerberos principal name that Kafka broker runs as. -This can be defined either in Kafka's JAAS config or in Kafka's config. - -[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] -===== `sasl_mechanism` - - * Value type is <> - * Default value is `"GSSAPI"` - -http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. -This may be any mechanism for which a security provider is available. -GSSAPI is the default mechanism. - -[id="{version}-plugins-{type}s-{plugin}-security_protocol"] -===== `security_protocol` - - * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` - * Default value is `"PLAINTEXT"` - -Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL - -[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] -===== `send_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The size of the TCP send buffer (SO_SNDBUF) to use when sending data - -[id="{version}-plugins-{type}s-{plugin}-session_timeout_ms"] -===== `session_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The timeout after which, if the `poll_timeout_ms` is not invoked, the consumer is marked dead -and a rebalance operation is triggered for the group identified by `group_id` - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] -===== `ssl_key_password` - - * Value type is <> - * There is no default value for this setting. - -The password of the private key in the key store file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] -===== `ssl_keystore_location` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore path. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] -===== `ssl_keystore_password` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] -===== `ssl_keystore_type` - - * Value type is <> - * There is no default value for this setting. - -The keystore type. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] -===== `ssl_truststore_location` - - * Value type is <> - * There is no default value for this setting. - -The JKS truststore path to validate the Kafka broker's certificate. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] -===== `ssl_truststore_password` - - * Value type is <> - * There is no default value for this setting. - -The truststore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] -===== `ssl_truststore_type` - - * Value type is <> - * There is no default value for this setting. - -The truststore type. - -[id="{version}-plugins-{type}s-{plugin}-topics"] -===== `topics` - - * Value type is <> - * Default value is `["logstash"]` - -A list of topics to subscribe to, defaults to ["logstash"]. - -[id="{version}-plugins-{type}s-{plugin}-topics_pattern"] -===== `topics_pattern` - - * Value type is <> - * There is no default value for this setting. - -A topic regex pattern to subscribe to. -The topics configuration will be ignored when using this configuration. - -[id="{version}-plugins-{type}s-{plugin}-value_deserializer_class"] -===== `value_deserializer_class` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringDeserializer"` - -Java Class used to deserialize the record's value - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/kinesis-index.asciidoc b/docs/versioned-plugins/inputs/kinesis-index.asciidoc deleted file mode 100644 index 565607741..000000000 --- a/docs/versioned-plugins/inputs/kinesis-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: kinesis -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-14 -| <> | 2017-08-22 -| <> | 2017-07-11 -| <> | 2017-06-27 -|======================================================================= - -include::kinesis-v2.0.7.asciidoc[] -include::kinesis-v2.0.6.asciidoc[] -include::kinesis-v2.0.5.asciidoc[] -include::kinesis-v2.0.4.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/kinesis-v2.0.4.asciidoc b/docs/versioned-plugins/inputs/kinesis-v2.0.4.asciidoc deleted file mode 100644 index 016c636c1..000000000 --- a/docs/versioned-plugins/inputs/kinesis-v2.0.4.asciidoc +++ /dev/null @@ -1,105 +0,0 @@ -Sending Logstash's logs to which is now configured via log4j2.properties -~~~ASCIIDOC_DOCUMENT~~~ -:plugin: kinesis -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.4 -:release_date: 2017-06-27 -:changelog_url: https://github.com/logstash-plugins/logstash-input-kinesis/blob/v2.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Kinesis input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Receive events through an AWS Kinesis stream. - -This input plugin uses the Java Kinesis Client Library underneath, so the -documentation at https://github.com/awslabs/amazon-kinesis-client will be -useful. - -AWS credentials can be specified either through environment variables, or an -IAM instance role. The library uses a DynamoDB table for worker coordination, -so you'll need to grant access to that as well as to the Kinesis stream. The -DynamoDB table has the same name as the `application_name` configuration -option, which defaults to "logstash". - -The library can optionally also send worker statistics to CloudWatch. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kinesis Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-application_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-checkpoint_interval_seconds>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-kinesis_stream_name>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>, one of `[nil, "cloudwatch"]`|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-application_name"] -===== `application_name` - - * Value type is <> - * Default value is `"logstash"` - -The application name used for the dynamodb coordination table. Must be -unique for this kinesis stream. - -[id="{version}-plugins-{type}s-{plugin}-checkpoint_interval_seconds"] -===== `checkpoint_interval_seconds` - - * Value type is <> - * Default value is `60` - -How many seconds between worker checkpoints to dynamodb. - -[id="{version}-plugins-{type}s-{plugin}-kinesis_stream_name"] -===== `kinesis_stream_name` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The kinesis stream name. - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * Value can be any of: ``, `cloudwatch` - * Default value is `nil` - -Worker metric tracking. By default this is disabled, set it to "cloudwatch" -to enable the cloudwatch integration in the Kinesis Client Library. - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value type is <> - * Default value is `"us-east-1"` - -The AWS region for Kinesis, DynamoDB, and CloudWatch (if enabled) - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/kinesis-v2.0.5.asciidoc b/docs/versioned-plugins/inputs/kinesis-v2.0.5.asciidoc deleted file mode 100644 index 848438ca3..000000000 --- a/docs/versioned-plugins/inputs/kinesis-v2.0.5.asciidoc +++ /dev/null @@ -1,112 +0,0 @@ -:plugin: kinesis -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.5 -:release_date: 2017-07-11 -:changelog_url: https://github.com/logstash-plugins/logstash-input-kinesis/blob/v2.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Kinesis input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Receive events through an AWS Kinesis stream. - -This input plugin uses the Java Kinesis Client Library underneath, so the -documentation at https://github.com/awslabs/amazon-kinesis-client will be -useful. - -AWS credentials can be specified either through environment variables, or an -IAM instance role. The library uses a DynamoDB table for worker coordination, -so you'll need to grant access to that as well as to the Kinesis stream. The -DynamoDB table has the same name as the `application_name` configuration -option, which defaults to "logstash". - -The library can optionally also send worker statistics to CloudWatch. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kinesis Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-application_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-checkpoint_interval_seconds>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-kinesis_stream_name>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>, one of `[nil, "cloudwatch"]`|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-profile>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-application_name"] -===== `application_name` - - * Value type is <> - * Default value is `"logstash"` - -The application name used for the dynamodb coordination table. Must be -unique for this kinesis stream. - -[id="{version}-plugins-{type}s-{plugin}-checkpoint_interval_seconds"] -===== `checkpoint_interval_seconds` - - * Value type is <> - * Default value is `60` - -How many seconds between worker checkpoints to dynamodb. - -[id="{version}-plugins-{type}s-{plugin}-kinesis_stream_name"] -===== `kinesis_stream_name` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The kinesis stream name. - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * Value can be any of: ``, `cloudwatch` - * Default value is `nil` - -Worker metric tracking. By default this is disabled, set it to "cloudwatch" -to enable the cloudwatch integration in the Kinesis Client Library. - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value type is <> - * Default value is `"us-east-1"` - -The AWS region for Kinesis, DynamoDB, and CloudWatch (if enabled) - -[id="{version}-plugins-{type}s-{plugin}-profile"] -===== `profile` - - * Value type is <> - * Default value is `nil` - -The AWS profile name for authentication. -This ensures that the `~/.aws/credentials` AWS auth provider is used. -By default this is empty and the default chain will be used. - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/kinesis-v2.0.6.asciidoc b/docs/versioned-plugins/inputs/kinesis-v2.0.6.asciidoc deleted file mode 100644 index 39c7da000..000000000 --- a/docs/versioned-plugins/inputs/kinesis-v2.0.6.asciidoc +++ /dev/null @@ -1,112 +0,0 @@ -:plugin: kinesis -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.6 -:release_date: 2017-08-22 -:changelog_url: https://github.com/logstash-plugins/logstash-input-kinesis/blob/v2.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Kinesis input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Receive events through an AWS Kinesis stream. - -This input plugin uses the Java Kinesis Client Library underneath, so the -documentation at https://github.com/awslabs/amazon-kinesis-client will be -useful. - -AWS credentials can be specified either through environment variables, or an -IAM instance role. The library uses a DynamoDB table for worker coordination, -so you'll need to grant access to that as well as to the Kinesis stream. The -DynamoDB table has the same name as the `application_name` configuration -option, which defaults to "logstash". - -The library can optionally also send worker statistics to CloudWatch. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kinesis Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-application_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-checkpoint_interval_seconds>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-kinesis_stream_name>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>, one of `[nil, "cloudwatch"]`|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-profile>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-application_name"] -===== `application_name` - - * Value type is <> - * Default value is `"logstash"` - -The application name used for the dynamodb coordination table. Must be -unique for this kinesis stream. - -[id="{version}-plugins-{type}s-{plugin}-checkpoint_interval_seconds"] -===== `checkpoint_interval_seconds` - - * Value type is <> - * Default value is `60` - -How many seconds between worker checkpoints to dynamodb. - -[id="{version}-plugins-{type}s-{plugin}-kinesis_stream_name"] -===== `kinesis_stream_name` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The kinesis stream name. - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * Value can be any of: ``, `cloudwatch` - * Default value is `nil` - -Worker metric tracking. By default this is disabled, set it to "cloudwatch" -to enable the cloudwatch integration in the Kinesis Client Library. - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value type is <> - * Default value is `"us-east-1"` - -The AWS region for Kinesis, DynamoDB, and CloudWatch (if enabled) - -[id="{version}-plugins-{type}s-{plugin}-profile"] -===== `profile` - - * Value type is <> - * Default value is `nil` - -The AWS profile name for authentication. -This ensures that the `~/.aws/credentials` AWS auth provider is used. -By default this is empty and the default chain will be used. - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/kinesis-v2.0.7.asciidoc b/docs/versioned-plugins/inputs/kinesis-v2.0.7.asciidoc deleted file mode 100644 index 4a9335fe3..000000000 --- a/docs/versioned-plugins/inputs/kinesis-v2.0.7.asciidoc +++ /dev/null @@ -1,112 +0,0 @@ -:plugin: kinesis -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.7 -:release_date: 2017-11-14 -:changelog_url: https://github.com/logstash-plugins/logstash-input-kinesis/blob/v2.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Kinesis input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Receive events through an AWS Kinesis stream. - -This input plugin uses the Java Kinesis Client Library underneath, so the -documentation at https://github.com/awslabs/amazon-kinesis-client will be -useful. - -AWS credentials can be specified either through environment variables, or an -IAM instance role. The library uses a DynamoDB table for worker coordination, -so you'll need to grant access to that as well as to the Kinesis stream. The -DynamoDB table has the same name as the `application_name` configuration -option, which defaults to "logstash". - -The library can optionally also send worker statistics to CloudWatch. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kinesis Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-application_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-checkpoint_interval_seconds>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-kinesis_stream_name>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>, one of `[nil, "cloudwatch"]`|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-profile>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-application_name"] -===== `application_name` - - * Value type is <> - * Default value is `"logstash"` - -The application name used for the dynamodb coordination table. Must be -unique for this kinesis stream. - -[id="{version}-plugins-{type}s-{plugin}-checkpoint_interval_seconds"] -===== `checkpoint_interval_seconds` - - * Value type is <> - * Default value is `60` - -How many seconds between worker checkpoints to dynamodb. - -[id="{version}-plugins-{type}s-{plugin}-kinesis_stream_name"] -===== `kinesis_stream_name` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The kinesis stream name. - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * Value can be any of: ``, `cloudwatch` - * Default value is `nil` - -Worker metric tracking. By default this is disabled, set it to "cloudwatch" -to enable the cloudwatch integration in the Kinesis Client Library. - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value type is <> - * Default value is `"us-east-1"` - -The AWS region for Kinesis, DynamoDB, and CloudWatch (if enabled) - -[id="{version}-plugins-{type}s-{plugin}-profile"] -===== `profile` - - * Value type is <> - * Default value is `nil` - -The AWS profile name for authentication. -This ensures that the `~/.aws/credentials` AWS auth provider is used. -By default this is empty and the default chain will be used. - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/log4j-index.asciidoc b/docs/versioned-plugins/inputs/log4j-index.asciidoc deleted file mode 100644 index ed17ba880..000000000 --- a/docs/versioned-plugins/inputs/log4j-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: log4j -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-08-01 -| <> | 2017-06-23 -|======================================================================= - -include::log4j-v3.1.2.asciidoc[] -include::log4j-v3.1.1.asciidoc[] -include::log4j-v3.1.0.asciidoc[] -include::log4j-v3.0.6.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/log4j-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/log4j-v3.0.6.asciidoc deleted file mode 100644 index 1d7205ea6..000000000 --- a/docs/versioned-plugins/inputs/log4j-v3.0.6.asciidoc +++ /dev/null @@ -1,171 +0,0 @@ -:plugin: log4j -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-log4j/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Log4j input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -==== Deprecation Notice - -NOTE: This plugin is deprecated. It is recommended that you use filebeat to collect logs from log4j. - -The following section is a guide for how to migrate from SocketAppender to use filebeat. - -To migrate away from log4j SocketAppender to using filebeat, you will need to make 3 changes: - -1) Configure your log4j.properties (in your app) to write to a local file. -2) Install and configure filebeat to collect those logs and ship them to Logstash -3) Configure Logstash to use the beats input. - -.Configuring log4j for writing to local files - -In your log4j.properties file, remove SocketAppender and replace it with RollingFileAppender. - -For example, you can use the following log4j.properties configuration to write daily log files. - - # Your app's log4j.properties (log4j 1.2 only) - log4j.rootLogger=daily - log4j.appender.daily=org.apache.log4j.rolling.RollingFileAppender - log4j.appender.daily.RollingPolicy=org.apache.log4j.rolling.TimeBasedRollingPolicy - log4j.appender.daily.RollingPolicy.FileNamePattern=/var/log/your-app/app.%d.log - log4j.appender.daily.layout = org.apache.log4j.PatternLayout - log4j.appender.daily.layout.ConversionPattern=%d{YYYY-MM-dd HH:mm:ss,SSSZ} %p %c{1}:%L - %m%n - -Configuring log4j.properties in more detail is outside the scope of this migration guide. - -.Configuring filebeat - -Next, -https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation.html[install -filebeat]. Based on the above log4j.properties, we can use this filebeat -configuration: - - # filebeat.yml - filebeat: - prospectors: - - - paths: - - /var/log/your-app/app.*.log - input_type: log - output: - logstash: - hosts: ["your-logstash-host:5000"] - -For more details on configuring filebeat, see -https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-configuration.html[the filebeat configuration guide]. - -.Configuring Logstash to receive from filebeat - -Finally, configure Logstash with a beats input: - - # logstash configuration - input { - beats { - port => 5000 - } - } - -It is strongly recommended that you also enable TLS in filebeat and logstash -beats input for protection and safety of your log data.. - -For more details on configuring the beats input, see -https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html[the logstash beats input documentation]. - -''' - -Read events over a TCP socket from a Log4j SocketAppender. This plugin works only with log4j version 1.x. - -Can either accept connections from clients or connect to a server, -depending on `mode`. Depending on which `mode` is configured, -you need a matching SocketAppender or a SocketHubAppender -on the remote side. - -One event is created per received log4j LoggingEvent with the following schema: - -* `timestamp` => the number of milliseconds elapsed from 1/1/1970 until logging event was created. -* `path` => the name of the logger -* `priority` => the level of this event -* `logger_name` => the name of the logger -* `thread` => the thread name making the logging request -* `class` => the fully qualified class name of the caller making the logging request. -* `file` => the source file name and line number of the caller making the logging request in a colon-separated format "fileName:lineNumber". -* `method` => the method name of the caller making the logging request. -* `NDC` => the NDC string -* `stack_trace` => the multi-line stack-trace - -Also if the original log4j LoggingEvent contains MDC hash entries, they will be merged in the event as fields. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Log4j Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -When mode is `server`, the address to listen on. -When mode is `client`, the address to connect to. - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"server"` - -Mode to operate in. `server` listens for client connections, -`client` connects to a server. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `4560` - -When mode is `server`, the port to listen on. -When mode is `client`, the port to connect to. - -[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] -===== `proxy_protocol` - - * Value type is <> - * Default value is `false` - -Proxy protocol support, only v1 is supported at this time -http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/log4j-v3.1.0.asciidoc b/docs/versioned-plugins/inputs/log4j-v3.1.0.asciidoc deleted file mode 100644 index c0255b298..000000000 --- a/docs/versioned-plugins/inputs/log4j-v3.1.0.asciidoc +++ /dev/null @@ -1,169 +0,0 @@ -:plugin: log4j -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.0 -:release_date: 2017-08-01 -:changelog_url: https://github.com/logstash-plugins/logstash-input-log4j/blob/v3.1.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Log4j input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Deprecation Notice - -NOTE: This plugin is deprecated. It is recommended that you use filebeat to collect logs from log4j. - -The following section is a guide for how to migrate from SocketAppender to use filebeat. - -To migrate away from log4j SocketAppender to using filebeat, you will need to make 3 changes: - -1) Configure your log4j.properties (in your app) to write to a local file. -2) Install and configure filebeat to collect those logs and ship them to Logstash -3) Configure Logstash to use the beats input. - -.Configuring log4j for writing to local files - -In your log4j.properties file, remove SocketAppender and replace it with RollingFileAppender. - -For example, you can use the following log4j.properties configuration to write daily log files. - - # Your app's log4j.properties (log4j 1.2 only) - log4j.rootLogger=daily - log4j.appender.daily=org.apache.log4j.rolling.RollingFileAppender - log4j.appender.daily.RollingPolicy=org.apache.log4j.rolling.TimeBasedRollingPolicy - log4j.appender.daily.RollingPolicy.FileNamePattern=/var/log/your-app/app.%d.log - log4j.appender.daily.layout = org.apache.log4j.PatternLayout - log4j.appender.daily.layout.ConversionPattern=%d{YYYY-MM-dd HH:mm:ss,SSSZ} %p %c{1}:%L - %m%n - -Configuring log4j.properties in more detail is outside the scope of this migration guide. - -.Configuring filebeat - -Next, -https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation.html[install -filebeat]. Based on the above log4j.properties, we can use this filebeat -configuration: - - # filebeat.yml - filebeat: - prospectors: - - - paths: - - /var/log/your-app/app.*.log - input_type: log - output: - logstash: - hosts: ["your-logstash-host:5000"] - -For more details on configuring filebeat, see -https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-configuration.html[the filebeat configuration guide]. - -.Configuring Logstash to receive from filebeat - -Finally, configure Logstash with a beats input: - - # logstash configuration - input { - beats { - port => 5000 - } - } - -It is strongly recommended that you also enable TLS in filebeat and logstash -beats input for protection and safety of your log data.. - -For more details on configuring the beats input, see -https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html[the logstash beats input documentation]. - -==== Description - -Read events over a TCP socket from a Log4j SocketAppender. This plugin works only with log4j version 1.x. - -Can either accept connections from clients or connect to a server, -depending on `mode`. Depending on which `mode` is configured, -you need a matching SocketAppender or a SocketHubAppender -on the remote side. - -One event is created per received log4j LoggingEvent with the following schema: - -* `timestamp` => the number of milliseconds elapsed from 1/1/1970 until logging event was created. -* `path` => the name of the logger -* `priority` => the level of this event -* `logger_name` => the name of the logger -* `thread` => the thread name making the logging request -* `class` => the fully qualified class name of the caller making the logging request. -* `file` => the source file name and line number of the caller making the logging request in a colon-separated format "fileName:lineNumber". -* `method` => the method name of the caller making the logging request. -* `NDC` => the NDC string -* `stack_trace` => the multi-line stack-trace - -Also if the original log4j LoggingEvent contains MDC hash entries, they will be merged in the event as fields. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Log4j Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -When mode is `server`, the address to listen on. -When mode is `client`, the address to connect to. - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"server"` - -Mode to operate in. `server` listens for client connections, -`client` connects to a server. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `4560` - -When mode is `server`, the port to listen on. -When mode is `client`, the port to connect to. - -[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] -===== `proxy_protocol` - - * Value type is <> - * Default value is `false` - -Proxy protocol support, only v1 is supported at this time -http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/log4j-v3.1.1.asciidoc b/docs/versioned-plugins/inputs/log4j-v3.1.1.asciidoc deleted file mode 100644 index 703bb94fe..000000000 --- a/docs/versioned-plugins/inputs/log4j-v3.1.1.asciidoc +++ /dev/null @@ -1,169 +0,0 @@ -:plugin: log4j -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.1 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-log4j/blob/v3.1.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Log4j input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Deprecation Notice - -NOTE: This plugin is deprecated. It is recommended that you use filebeat to collect logs from log4j. - -The following section is a guide for how to migrate from SocketAppender to use filebeat. - -To migrate away from log4j SocketAppender to using filebeat, you will need to make 3 changes: - -1) Configure your log4j.properties (in your app) to write to a local file. -2) Install and configure filebeat to collect those logs and ship them to Logstash -3) Configure Logstash to use the beats input. - -.Configuring log4j for writing to local files - -In your log4j.properties file, remove SocketAppender and replace it with RollingFileAppender. - -For example, you can use the following log4j.properties configuration to write daily log files. - - # Your app's log4j.properties (log4j 1.2 only) - log4j.rootLogger=daily - log4j.appender.daily=org.apache.log4j.rolling.RollingFileAppender - log4j.appender.daily.RollingPolicy=org.apache.log4j.rolling.TimeBasedRollingPolicy - log4j.appender.daily.RollingPolicy.FileNamePattern=/var/log/your-app/app.%d.log - log4j.appender.daily.layout = org.apache.log4j.PatternLayout - log4j.appender.daily.layout.ConversionPattern=%d{YYYY-MM-dd HH:mm:ss,SSSZ} %p %c{1}:%L - %m%n - -Configuring log4j.properties in more detail is outside the scope of this migration guide. - -.Configuring filebeat - -Next, -https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation.html[install -filebeat]. Based on the above log4j.properties, we can use this filebeat -configuration: - - # filebeat.yml - filebeat: - prospectors: - - - paths: - - /var/log/your-app/app.*.log - input_type: log - output: - logstash: - hosts: ["your-logstash-host:5000"] - -For more details on configuring filebeat, see -https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-configuration.html[the filebeat configuration guide]. - -.Configuring Logstash to receive from filebeat - -Finally, configure Logstash with a beats input: - - # logstash configuration - input { - beats { - port => 5000 - } - } - -It is strongly recommended that you also enable TLS in filebeat and logstash -beats input for protection and safety of your log data.. - -For more details on configuring the beats input, see -https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html[the logstash beats input documentation]. - -==== Description - -Read events over a TCP socket from a Log4j SocketAppender. This plugin works only with log4j version 1.x. - -Can either accept connections from clients or connect to a server, -depending on `mode`. Depending on which `mode` is configured, -you need a matching SocketAppender or a SocketHubAppender -on the remote side. - -One event is created per received log4j LoggingEvent with the following schema: - -* `timestamp` => the number of milliseconds elapsed from 1/1/1970 until logging event was created. -* `path` => the name of the logger -* `priority` => the level of this event -* `logger_name` => the name of the logger -* `thread` => the thread name making the logging request -* `class` => the fully qualified class name of the caller making the logging request. -* `file` => the source file name and line number of the caller making the logging request in a colon-separated format "fileName:lineNumber". -* `method` => the method name of the caller making the logging request. -* `NDC` => the NDC string -* `stack_trace` => the multi-line stack-trace - -Also if the original log4j LoggingEvent contains MDC hash entries, they will be merged in the event as fields. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Log4j Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -When mode is `server`, the address to listen on. -When mode is `client`, the address to connect to. - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"server"` - -Mode to operate in. `server` listens for client connections, -`client` connects to a server. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `4560` - -When mode is `server`, the port to listen on. -When mode is `client`, the port to connect to. - -[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] -===== `proxy_protocol` - - * Value type is <> - * Default value is `false` - -Proxy protocol support, only v1 is supported at this time -http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/log4j-v3.1.2.asciidoc b/docs/versioned-plugins/inputs/log4j-v3.1.2.asciidoc deleted file mode 100644 index d2721bbc1..000000000 --- a/docs/versioned-plugins/inputs/log4j-v3.1.2.asciidoc +++ /dev/null @@ -1,169 +0,0 @@ -:plugin: log4j -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.2 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-log4j/blob/v3.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Log4j input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Deprecation Notice - -NOTE: This plugin is deprecated. It is recommended that you use filebeat to collect logs from log4j. - -The following section is a guide for how to migrate from SocketAppender to use filebeat. - -To migrate away from log4j SocketAppender to using filebeat, you will need to make 3 changes: - -1) Configure your log4j.properties (in your app) to write to a local file. -2) Install and configure filebeat to collect those logs and ship them to Logstash -3) Configure Logstash to use the beats input. - -.Configuring log4j for writing to local files - -In your log4j.properties file, remove SocketAppender and replace it with RollingFileAppender. - -For example, you can use the following log4j.properties configuration to write daily log files. - - # Your app's log4j.properties (log4j 1.2 only) - log4j.rootLogger=daily - log4j.appender.daily=org.apache.log4j.rolling.RollingFileAppender - log4j.appender.daily.RollingPolicy=org.apache.log4j.rolling.TimeBasedRollingPolicy - log4j.appender.daily.RollingPolicy.FileNamePattern=/var/log/your-app/app.%d.log - log4j.appender.daily.layout = org.apache.log4j.PatternLayout - log4j.appender.daily.layout.ConversionPattern=%d{YYYY-MM-dd HH:mm:ss,SSSZ} %p %c{1}:%L - %m%n - -Configuring log4j.properties in more detail is outside the scope of this migration guide. - -.Configuring filebeat - -Next, -https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-installation.html[install -filebeat]. Based on the above log4j.properties, we can use this filebeat -configuration: - - # filebeat.yml - filebeat: - prospectors: - - - paths: - - /var/log/your-app/app.*.log - input_type: log - output: - logstash: - hosts: ["your-logstash-host:5000"] - -For more details on configuring filebeat, see -https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-configuration.html[the filebeat configuration guide]. - -.Configuring Logstash to receive from filebeat - -Finally, configure Logstash with a beats input: - - # logstash configuration - input { - beats { - port => 5000 - } - } - -It is strongly recommended that you also enable TLS in filebeat and logstash -beats input for protection and safety of your log data.. - -For more details on configuring the beats input, see -https://www.elastic.co/guide/en/logstash/current/plugins-inputs-beats.html[the logstash beats input documentation]. - -==== Description - -Read events over a TCP socket from a Log4j SocketAppender. This plugin works only with log4j version 1.x. - -Can either accept connections from clients or connect to a server, -depending on `mode`. Depending on which `mode` is configured, -you need a matching SocketAppender or a SocketHubAppender -on the remote side. - -One event is created per received log4j LoggingEvent with the following schema: - -* `timestamp` => the number of milliseconds elapsed from 1/1/1970 until logging event was created. -* `path` => the name of the logger -* `priority` => the level of this event -* `logger_name` => the name of the logger -* `thread` => the thread name making the logging request -* `class` => the fully qualified class name of the caller making the logging request. -* `file` => the source file name and line number of the caller making the logging request in a colon-separated format "fileName:lineNumber". -* `method` => the method name of the caller making the logging request. -* `NDC` => the NDC string -* `stack_trace` => the multi-line stack-trace - -Also if the original log4j LoggingEvent contains MDC hash entries, they will be merged in the event as fields. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Log4j Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -When mode is `server`, the address to listen on. -When mode is `client`, the address to connect to. - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"server"` - -Mode to operate in. `server` listens for client connections, -`client` connects to a server. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `4560` - -When mode is `server`, the port to listen on. -When mode is `client`, the port to connect to. - -[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] -===== `proxy_protocol` - - * Value type is <> - * Default value is `false` - -Proxy protocol support, only v1 is supported at this time -http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/log4j2-index.asciidoc b/docs/versioned-plugins/inputs/log4j2-index.asciidoc deleted file mode 100644 index fd9f25a6a..000000000 --- a/docs/versioned-plugins/inputs/log4j2-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: log4j2 -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/inputs/lumberjack-index.asciidoc b/docs/versioned-plugins/inputs/lumberjack-index.asciidoc deleted file mode 100644 index 651759204..000000000 --- a/docs/versioned-plugins/inputs/lumberjack-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: lumberjack -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::lumberjack-v3.1.4.asciidoc[] -include::lumberjack-v3.1.3.asciidoc[] -include::lumberjack-v3.1.2.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/lumberjack-v3.1.2.asciidoc b/docs/versioned-plugins/inputs/lumberjack-v3.1.2.asciidoc deleted file mode 100644 index 66a95a218..000000000 --- a/docs/versioned-plugins/inputs/lumberjack-v3.1.2.asciidoc +++ /dev/null @@ -1,112 +0,0 @@ -:plugin: lumberjack -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-lumberjack/blob/v3.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Lumberjack input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Receive events using the Lumberjack protocol. - -This input can be used to reliably and securely transport -events between Logstash instances. To do so, use the -<> -in the sending Logstash instance(s). - -It can also be used to receive events from the deprecated -https://github.com/elastic/logstash-forwarder[logstash-forwarder] -tool that has been replaced by -https://github.com/elastic/beats/tree/master/filebeat[Filebeat]. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Lumberjack Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] -===== `congestion_threshold` - - * Value type is <> - * Default value is `5` - -The number of seconds before we raise a timeout, -this option is useful to control how much time to wait if something is blocking the pipeline. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The IP address to listen on. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The port to listen on. - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] -===== `ssl_certificate` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -SSL certificate to use. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -SSL key to use. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is <> - * There is no default value for this setting. - -SSL key passphrase to use. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/lumberjack-v3.1.3.asciidoc b/docs/versioned-plugins/inputs/lumberjack-v3.1.3.asciidoc deleted file mode 100644 index 666c47368..000000000 --- a/docs/versioned-plugins/inputs/lumberjack-v3.1.3.asciidoc +++ /dev/null @@ -1,112 +0,0 @@ -:plugin: lumberjack -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-lumberjack/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Lumberjack input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Receive events using the Lumberjack protocol. - -This input can be used to reliably and securely transport -events between Logstash instances. To do so, use the -<> -in the sending Logstash instance(s). - -It can also be used to receive events from the deprecated -https://github.com/elastic/logstash-forwarder[logstash-forwarder] -tool that has been replaced by -https://github.com/elastic/beats/tree/master/filebeat[Filebeat]. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Lumberjack Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] -===== `congestion_threshold` - - * Value type is <> - * Default value is `5` - -The number of seconds before we raise a timeout, -this option is useful to control how much time to wait if something is blocking the pipeline. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The IP address to listen on. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The port to listen on. - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] -===== `ssl_certificate` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -SSL certificate to use. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -SSL key to use. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is <> - * There is no default value for this setting. - -SSL key passphrase to use. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/lumberjack-v3.1.4.asciidoc b/docs/versioned-plugins/inputs/lumberjack-v3.1.4.asciidoc deleted file mode 100644 index bd0514369..000000000 --- a/docs/versioned-plugins/inputs/lumberjack-v3.1.4.asciidoc +++ /dev/null @@ -1,112 +0,0 @@ -:plugin: lumberjack -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.4 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-lumberjack/blob/v3.1.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Lumberjack input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Receive events using the Lumberjack protocol. - -This input can be used to reliably and securely transport -events between Logstash instances. To do so, use the -<> -in the sending Logstash instance(s). - -It can also be used to receive events from the deprecated -https://github.com/elastic/logstash-forwarder[logstash-forwarder] -tool that has been replaced by -https://github.com/elastic/beats/tree/master/filebeat[Filebeat]. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Lumberjack Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] -===== `congestion_threshold` - - * Value type is <> - * Default value is `5` - -The number of seconds before we raise a timeout, -this option is useful to control how much time to wait if something is blocking the pipeline. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The IP address to listen on. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The port to listen on. - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] -===== `ssl_certificate` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -SSL certificate to use. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -SSL key to use. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is <> - * There is no default value for this setting. - -SSL key passphrase to use. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/meetup-index.asciidoc b/docs/versioned-plugins/inputs/meetup-index.asciidoc deleted file mode 100644 index baff7792e..000000000 --- a/docs/versioned-plugins/inputs/meetup-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: meetup -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::meetup-v3.0.3.asciidoc[] -include::meetup-v3.0.2.asciidoc[] -include::meetup-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/meetup-v3.0.1.asciidoc b/docs/versioned-plugins/inputs/meetup-v3.0.1.asciidoc deleted file mode 100644 index 5802be8cf..000000000 --- a/docs/versioned-plugins/inputs/meetup-v3.0.1.asciidoc +++ /dev/null @@ -1,102 +0,0 @@ -:plugin: meetup -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-meetup/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Meetup input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Periodically query meetup.com regarding updates on events for the given meetupkey - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Meetup Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-eventstatus>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-groupid>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-meetupkey>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-urlname>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-venueid>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-eventstatus"] -===== `eventstatus` - - * Value type is <> - * Default value is `"upcoming,past"` - -Event Status' - -[id="{version}-plugins-{type}s-{plugin}-groupid"] -===== `groupid` - - * Value type is <> - * There is no default value for this setting. - -The Group ID, multiple may be specified seperated by commas -Must have one of `urlname`, `venueid`, `groupid` - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Interval to run the command. Value is in minutes. - -[id="{version}-plugins-{type}s-{plugin}-meetupkey"] -===== `meetupkey` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Meetup Key - -[id="{version}-plugins-{type}s-{plugin}-urlname"] -===== `urlname` - - * Value type is <> - * There is no default value for this setting. - -URLName - the URL name ie `ElasticSearch-Oklahoma-City` -Must have one of urlname, venue_id, group_id - -[id="{version}-plugins-{type}s-{plugin}-venueid"] -===== `venueid` - - * Value type is <> - * There is no default value for this setting. - -The venue ID -Must have one of `urlname`, `venue_id`, `group_id` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/meetup-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/meetup-v3.0.2.asciidoc deleted file mode 100644 index 11a0edd87..000000000 --- a/docs/versioned-plugins/inputs/meetup-v3.0.2.asciidoc +++ /dev/null @@ -1,102 +0,0 @@ -:plugin: meetup -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-meetup/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Meetup input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Periodically query meetup.com regarding updates on events for the given meetupkey - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Meetup Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-eventstatus>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-groupid>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-meetupkey>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-urlname>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-venueid>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-eventstatus"] -===== `eventstatus` - - * Value type is <> - * Default value is `"upcoming,past"` - -Event Status' - -[id="{version}-plugins-{type}s-{plugin}-groupid"] -===== `groupid` - - * Value type is <> - * There is no default value for this setting. - -The Group ID, multiple may be specified seperated by commas -Must have one of `urlname`, `venueid`, `groupid` - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Interval to run the command. Value is in minutes. - -[id="{version}-plugins-{type}s-{plugin}-meetupkey"] -===== `meetupkey` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Meetup Key - -[id="{version}-plugins-{type}s-{plugin}-urlname"] -===== `urlname` - - * Value type is <> - * There is no default value for this setting. - -URLName - the URL name ie `ElasticSearch-Oklahoma-City` -Must have one of urlname, venue_id, group_id - -[id="{version}-plugins-{type}s-{plugin}-venueid"] -===== `venueid` - - * Value type is <> - * There is no default value for this setting. - -The venue ID -Must have one of `urlname`, `venue_id`, `group_id` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/meetup-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/meetup-v3.0.3.asciidoc deleted file mode 100644 index 01c34acb8..000000000 --- a/docs/versioned-plugins/inputs/meetup-v3.0.3.asciidoc +++ /dev/null @@ -1,102 +0,0 @@ -:plugin: meetup -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-meetup/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Meetup input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Periodically query meetup.com regarding updates on events for the given meetupkey - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Meetup Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-eventstatus>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-groupid>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-meetupkey>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-urlname>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-venueid>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-eventstatus"] -===== `eventstatus` - - * Value type is <> - * Default value is `"upcoming,past"` - -Event Status' - -[id="{version}-plugins-{type}s-{plugin}-groupid"] -===== `groupid` - - * Value type is <> - * There is no default value for this setting. - -The Group ID, multiple may be specified seperated by commas -Must have one of `urlname`, `venueid`, `groupid` - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Interval to run the command. Value is in minutes. - -[id="{version}-plugins-{type}s-{plugin}-meetupkey"] -===== `meetupkey` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Meetup Key - -[id="{version}-plugins-{type}s-{plugin}-urlname"] -===== `urlname` - - * Value type is <> - * There is no default value for this setting. - -URLName - the URL name ie `ElasticSearch-Oklahoma-City` -Must have one of urlname, venue_id, group_id - -[id="{version}-plugins-{type}s-{plugin}-venueid"] -===== `venueid` - - * Value type is <> - * There is no default value for this setting. - -The venue ID -Must have one of `urlname`, `venue_id`, `group_id` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/mongodb-index.asciidoc b/docs/versioned-plugins/inputs/mongodb-index.asciidoc deleted file mode 100644 index 4f5089843..000000000 --- a/docs/versioned-plugins/inputs/mongodb-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: mongodb -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/inputs/neo4j-index.asciidoc b/docs/versioned-plugins/inputs/neo4j-index.asciidoc deleted file mode 100644 index 66b3d770c..000000000 --- a/docs/versioned-plugins/inputs/neo4j-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: neo4j -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::neo4j-v2.0.6.asciidoc[] -include::neo4j-v2.0.5.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/neo4j-v2.0.5.asciidoc b/docs/versioned-plugins/inputs/neo4j-v2.0.5.asciidoc deleted file mode 100644 index a5910f9b0..000000000 --- a/docs/versioned-plugins/inputs/neo4j-v2.0.5.asciidoc +++ /dev/null @@ -1,93 +0,0 @@ -:plugin: neo4j -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-neo4j/blob/v2.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Neo4j input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This plugin gets data from a Neo4j database in predefined intervals. To fetch -this data uses a given Cypher query. - -### Usage: -[source, ruby] -input { - neo4j { - query => "MATCH (p:`Person`)-->(m:`Movie`) WHERE m.released = 2005 RETURN *" - path => "/foo/bar.db" - } -} - -In embedded_db mode this plugin require a neo4j db 2.0.1 or superior. If -using the remote version there is no major restriction. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Neo4j Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The path within your file system where the neo4j database is located - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -If undefined, Logstash will complain, even if codec is unused. -Cypher query used to retrieve data from the neo4j database, this statement -should looks like something like this: - -MATCH (p:`Person`)-->(m:`Movie`) WHERE m.released = 2005 RETURN * - - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically run statement, in Cron format -for example: "* * * * *" (execute query every minute, on the minute). -If this variable is not specified then this input will run only once - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/neo4j-v2.0.6.asciidoc b/docs/versioned-plugins/inputs/neo4j-v2.0.6.asciidoc deleted file mode 100644 index 4af34443e..000000000 --- a/docs/versioned-plugins/inputs/neo4j-v2.0.6.asciidoc +++ /dev/null @@ -1,93 +0,0 @@ -:plugin: neo4j -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.6 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-neo4j/blob/v2.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Neo4j input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This plugin gets data from a Neo4j database in predefined intervals. To fetch -this data uses a given Cypher query. - -### Usage: -[source, ruby] -input { - neo4j { - query => "MATCH (p:`Person`)-->(m:`Movie`) WHERE m.released = 2005 RETURN *" - path => "/foo/bar.db" - } -} - -In embedded_db mode this plugin require a neo4j db 2.0.1 or superior. If -using the remote version there is no major restriction. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Neo4j Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-schedule>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The path within your file system where the neo4j database is located - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -If undefined, Logstash will complain, even if codec is unused. -Cypher query used to retrieve data from the neo4j database, this statement -should looks like something like this: - -MATCH (p:`Person`)-->(m:`Movie`) WHERE m.released = 2005 RETURN * - - -[id="{version}-plugins-{type}s-{plugin}-schedule"] -===== `schedule` - - * Value type is <> - * There is no default value for this setting. - -Schedule of when to periodically run statement, in Cron format -for example: "* * * * *" (execute query every minute, on the minute). -If this variable is not specified then this input will run only once - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/netflow-index.asciidoc b/docs/versioned-plugins/inputs/netflow-index.asciidoc deleted file mode 100644 index 8b4978af7..000000000 --- a/docs/versioned-plugins/inputs/netflow-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: netflow -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/inputs/perfmon-index.asciidoc b/docs/versioned-plugins/inputs/perfmon-index.asciidoc deleted file mode 100644 index 165e7842d..000000000 --- a/docs/versioned-plugins/inputs/perfmon-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: perfmon -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/inputs/pipe-index.asciidoc b/docs/versioned-plugins/inputs/pipe-index.asciidoc deleted file mode 100644 index 8d87e45a0..000000000 --- a/docs/versioned-plugins/inputs/pipe-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: pipe -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::pipe-v3.0.6.asciidoc[] -include::pipe-v3.0.5.asciidoc[] -include::pipe-v3.0.4.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/pipe-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/pipe-v3.0.4.asciidoc deleted file mode 100644 index 29c8ee8a2..000000000 --- a/docs/versioned-plugins/inputs/pipe-v3.0.4.asciidoc +++ /dev/null @@ -1,61 +0,0 @@ -:plugin: pipe -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-pipe/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Pipe input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Stream events from a long running command pipe. - -By default, each event is assumed to be one line. If you -want to join lines, you'll want to use the multiline codec. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Pipe Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-command"] -===== `command` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Command to run and read events from, one line at a time. - -Example: -[source,ruby] - command => "echo hello world" - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/pipe-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/pipe-v3.0.5.asciidoc deleted file mode 100644 index 50b88a09d..000000000 --- a/docs/versioned-plugins/inputs/pipe-v3.0.5.asciidoc +++ /dev/null @@ -1,61 +0,0 @@ -:plugin: pipe -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-pipe/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Pipe input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Stream events from a long running command pipe. - -By default, each event is assumed to be one line. If you -want to join lines, you'll want to use the multiline codec. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Pipe Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-command"] -===== `command` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Command to run and read events from, one line at a time. - -Example: -[source,ruby] - command => "echo hello world" - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/pipe-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/pipe-v3.0.6.asciidoc deleted file mode 100644 index 46eabd7bb..000000000 --- a/docs/versioned-plugins/inputs/pipe-v3.0.6.asciidoc +++ /dev/null @@ -1,61 +0,0 @@ -:plugin: pipe -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-pipe/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Pipe input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Stream events from a long running command pipe. - -By default, each event is assumed to be one line. If you -want to join lines, you'll want to use the multiline codec. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Pipe Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-command"] -===== `command` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Command to run and read events from, one line at a time. - -Example: -[source,ruby] - command => "echo hello world" - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/puppet_facter-index.asciidoc b/docs/versioned-plugins/inputs/puppet_facter-index.asciidoc deleted file mode 100644 index cf3e696ac..000000000 --- a/docs/versioned-plugins/inputs/puppet_facter-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: puppet_facter -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-15 -| <> | 2017-06-23 -|======================================================================= - -include::puppet_facter-v3.0.3.asciidoc[] -include::puppet_facter-v3.0.2.asciidoc[] -include::puppet_facter-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/puppet_facter-v3.0.1.asciidoc b/docs/versioned-plugins/inputs/puppet_facter-v3.0.1.asciidoc deleted file mode 100644 index 426557593..000000000 --- a/docs/versioned-plugins/inputs/puppet_facter-v3.0.1.asciidoc +++ /dev/null @@ -1,106 +0,0 @@ -:plugin: puppet_facter -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-puppet_facter/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Puppet_facter input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Connects to a puppet server and requests facts - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Puppet_facter Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-environment>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-private_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-public_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-environment"] -===== `environment` - - * Value type is <> - * Default value is `"production"` - - - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - - - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `600` - - - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `8140` - - - -[id="{version}-plugins-{type}s-{plugin}-private_key"] -===== `private_key` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-public_key"] -===== `public_key` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `true` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/puppet_facter-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/puppet_facter-v3.0.2.asciidoc deleted file mode 100644 index 2a1e1400b..000000000 --- a/docs/versioned-plugins/inputs/puppet_facter-v3.0.2.asciidoc +++ /dev/null @@ -1,106 +0,0 @@ -:plugin: puppet_facter -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-puppet_facter/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Puppet_facter input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Connects to a puppet server and requests facts - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Puppet_facter Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-environment>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-private_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-public_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-environment"] -===== `environment` - - * Value type is <> - * Default value is `"production"` - - - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - - - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `600` - - - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `8140` - - - -[id="{version}-plugins-{type}s-{plugin}-private_key"] -===== `private_key` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-public_key"] -===== `public_key` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `true` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/puppet_facter-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/puppet_facter-v3.0.3.asciidoc deleted file mode 100644 index 9fc392a4b..000000000 --- a/docs/versioned-plugins/inputs/puppet_facter-v3.0.3.asciidoc +++ /dev/null @@ -1,106 +0,0 @@ -:plugin: puppet_facter -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-puppet_facter/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Puppet_facter input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Connects to a puppet server and requests facts - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Puppet_facter Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-environment>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-private_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-public_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-environment"] -===== `environment` - - * Value type is <> - * Default value is `"production"` - - - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - - - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `600` - - - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `8140` - - - -[id="{version}-plugins-{type}s-{plugin}-private_key"] -===== `private_key` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-public_key"] -===== `public_key` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `true` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rabbitmq-index.asciidoc b/docs/versioned-plugins/inputs/rabbitmq-index.asciidoc deleted file mode 100644 index d8f3ed14b..000000000 --- a/docs/versioned-plugins/inputs/rabbitmq-index.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -:plugin: rabbitmq -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-08-02 -| <> | 2017-08-18 -| <> | 2017-06-23 -|======================================================================= - -include::rabbitmq-v6.0.2.asciidoc[] -include::rabbitmq-v6.0.1.asciidoc[] -include::rabbitmq-v6.0.0.asciidoc[] -include::rabbitmq-v5.2.5.asciidoc[] -include::rabbitmq-v5.2.4.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/rabbitmq-v5.2.4.asciidoc b/docs/versioned-plugins/inputs/rabbitmq-v5.2.4.asciidoc deleted file mode 100644 index 714bc915e..000000000 --- a/docs/versioned-plugins/inputs/rabbitmq-v5.2.4.asciidoc +++ /dev/null @@ -1,415 +0,0 @@ -:plugin: rabbitmq -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.2.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-rabbitmq/blob/v5.2.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Rabbitmq input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Pull events from a http://www.rabbitmq.com/[RabbitMQ] queue. - -The default settings will create an entirely transient queue and listen for all messages by default. -If you need durability or any other advanced settings, please set the appropriate options - -This plugin uses the http://rubymarchhare.info/[March Hare] library -for interacting with the RabbitMQ server. Most configuration options -map directly to standard RabbitMQ and AMQP concepts. The -https://www.rabbitmq.com/amqp-0-9-1-reference.html[AMQP 0-9-1 reference guide] -and other parts of the RabbitMQ documentation are useful for deeper -understanding. - -The properties of messages received will be stored in the -`[@metadata][rabbitmq_properties]` field if the `@metadata_enabled` setting is checked. -Note that storing metadata may degrade performance. -The following properties may be available (in most cases dependent on whether -they were set by the sender): - -* app-id -* cluster-id -* consumer-tag -* content-encoding -* content-type -* correlation-id -* delivery-mode -* exchange -* expiration -* message-id -* priority -* redeliver -* reply-to -* routing-key -* timestamp -* type -* user-id - -For example, to get the RabbitMQ message's timestamp property -into the Logstash event's `@timestamp` field, use the date -filter to parse the `[@metadata][rabbitmq_properties][timestamp]` -field: -[source,ruby] - filter { - if [@metadata][rabbitmq_properties][timestamp] { - date { - match => ["[@metadata][rabbitmq_properties][timestamp]", "UNIX"] - } - } - } - -Additionally, any message headers will be saved in the -`[@metadata][rabbitmq_headers]` field. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rabbitmq Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ack>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-auto_delete>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclusive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_enabled>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefetch_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ack"] -===== `ack` - - * Value type is <> - * Default value is `true` - -Enable message acknowledgements. With acknowledgements -messages fetched by Logstash but not yet sent into the -Logstash pipeline will be requeued by the server if Logstash -shuts down. Acknowledgements will however hurt the message -throughput. - -This will only send an ack back every `prefetch_count` messages. -Working in batches provides a performance boost here. - -[id="{version}-plugins-{type}s-{plugin}-arguments"] -===== `arguments` - - * Value type is <> - * Default value is `{}` - -Extra queue arguments as an array. -To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` - -[id="{version}-plugins-{type}s-{plugin}-auto_delete"] -===== `auto_delete` - - * Value type is <> - * Default value is `false` - -Should the queue be deleted on the broker when the last consumer -disconnects? Set this option to `false` if you want the queue to remain -on the broker, queueing up messages until a consumer comes along to -consume them. - -[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] -===== `automatic_recovery` - - * Value type is <> - * Default value is `true` - -Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! - -[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] -===== `connect_retry_interval` - - * Value type is <> - * Default value is `1` - -Time in seconds to wait before retrying a connection - -[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] -===== `connection_timeout` - - * Value type is <> - * There is no default value for this setting. - -The default connection timeout in milliseconds. If not specified the timeout is infinite. - -[id="{version}-plugins-{type}s-{plugin}-durable"] -===== `durable` - - * Value type is <> - * Default value is `false` - -Is this queue durable? (aka; Should it survive a broker restart?) - -[id="{version}-plugins-{type}s-{plugin}-exchange"] -===== `exchange` - - * Value type is <> - * There is no default value for this setting. - -The name of the exchange to bind the queue to. Specify `exchange_type` -as well to declare the exchange if it does not exist - -[id="{version}-plugins-{type}s-{plugin}-exchange_type"] -===== `exchange_type` - - * Value type is <> - * There is no default value for this setting. - -The type of the exchange to bind to. Specifying this will cause this plugin -to declare the exchange if it does not exist. - -[id="{version}-plugins-{type}s-{plugin}-exclusive"] -===== `exclusive` - - * Value type is <> - * Default value is `false` - -Is the queue exclusive? Exclusive queues can only be used by the connection -that declared them and will be deleted when it is closed (e.g. due to a Logstash -restart). - -[id="{version}-plugins-{type}s-{plugin}-heartbeat"] -===== `heartbeat` - - * Value type is <> - * There is no default value for this setting. - -Heartbeat delay in seconds. If unspecified no heartbeats will be sent - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Common functionality for the rabbitmq input/output -RabbitMQ server address(es) -host can either be a single host, or a list of hosts -i.e. - host => "localhost" -or - host => ["host01", "host02] - -if multiple hosts are provided on the initial connection and any subsequent -recovery attempts of the hosts is chosen at random and connected to. -Note that only one host connection is active at a time. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * Default value is `"logstash"` - -The routing key to use when binding a queue to the exchange. -This is only relevant for direct or topic exchanges. - -* Routing keys are ignored on fanout exchanges. -* Wildcards are not valid on direct exchanges. - -[id="{version}-plugins-{type}s-{plugin}-metadata_enabled"] -===== `metadata_enabled` - - * Value type is <> - * Default value is `false` - -Enable the storage of message headers and properties in `@metadata`. This may impact performance - -[id="{version}-plugins-{type}s-{plugin}-passive"] -===== `passive` - - * Value type is <> - * Default value is `false` - -If true the queue will be passively declared, meaning it must -already exist on the server. To have Logstash create the queue -if necessary leave this option as false. If actively declaring -a queue that already exists, the queue options for this plugin -(durable etc) must match those of the existing queue. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ password - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5672` - -RabbitMQ port to connect on - -[id="{version}-plugins-{type}s-{plugin}-prefetch_count"] -===== `prefetch_count` - - * Value type is <> - * Default value is `256` - -Prefetch count. If acknowledgements are enabled with the `ack` -option, specifies the number of outstanding unacknowledged -messages allowed. - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * Value type is <> - * Default value is `""` - -The properties to extract from each message and store in a -@metadata field. - -Technically the exchange, redeliver, and routing-key -properties belong to the envelope and not the message but we -ignore that distinction here. However, we extract the -headers separately via get_headers even though the header -table technically is a message property. - -Freezing all strings so that code modifying the event's -@metadata field can't touch them. - -If updating this list, remember to update the documentation -above too. -The default codec for this plugin is JSON. You can override this to suit your particular needs however. -The name of the queue Logstash will consume events from. If -left empty, a transient queue with an randomly chosen name -will be created. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * There is no default value for this setting. - -Enable or disable SSL. -Note that by default remote certificate verification is off. -Specify ssl_certificate_path and ssl_certificate_password if you need -certificate verification - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] -===== `ssl_certificate_password` - - * Value type is <> - * There is no default value for this setting. - -Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] -===== `ssl_certificate_path` - - * Value type is <> - * There is no default value for this setting. - -Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host - -[id="{version}-plugins-{type}s-{plugin}-ssl_version"] -===== `ssl_version` - - * Value type is <> - * Default value is `"TLSv1.2"` - -Version of the SSL protocol to use. - -[id="{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds"] -===== `subscription_retry_interval_seconds` - - * This is a required setting. - * Value type is <> - * Default value is `5` - -Amount of time in seconds to wait after a failed subscription request -before retrying. Subscribes can fail if the server goes away and then comes back. - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - -[id="{version}-plugins-{type}s-{plugin}-tls_certificate_password"] -===== `tls_certificate_password` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -TLS certificate password - -[id="{version}-plugins-{type}s-{plugin}-tls_certificate_path"] -===== `tls_certificate_path` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -TLS certifcate path - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ username - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `"/"` - -The vhost (virtual host) to use. If you don't know what this -is, leave the default. With the exception of the default -vhost ("/"), names of vhosts should not begin with a forward -slash. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rabbitmq-v5.2.5.asciidoc b/docs/versioned-plugins/inputs/rabbitmq-v5.2.5.asciidoc deleted file mode 100644 index 2bc863d32..000000000 --- a/docs/versioned-plugins/inputs/rabbitmq-v5.2.5.asciidoc +++ /dev/null @@ -1,415 +0,0 @@ -:plugin: rabbitmq -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.2.5 -:release_date: 2017-08-18 -:changelog_url: https://github.com/logstash-plugins/logstash-input-rabbitmq/blob/v5.2.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Rabbitmq input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Pull events from a http://www.rabbitmq.com/[RabbitMQ] queue. - -The default settings will create an entirely transient queue and listen for all messages by default. -If you need durability or any other advanced settings, please set the appropriate options - -This plugin uses the http://rubymarchhare.info/[March Hare] library -for interacting with the RabbitMQ server. Most configuration options -map directly to standard RabbitMQ and AMQP concepts. The -https://www.rabbitmq.com/amqp-0-9-1-reference.html[AMQP 0-9-1 reference guide] -and other parts of the RabbitMQ documentation are useful for deeper -understanding. - -The properties of messages received will be stored in the -`[@metadata][rabbitmq_properties]` field if the `@metadata_enabled` setting is checked. -Note that storing metadata may degrade performance. -The following properties may be available (in most cases dependent on whether -they were set by the sender): - -* app-id -* cluster-id -* consumer-tag -* content-encoding -* content-type -* correlation-id -* delivery-mode -* exchange -* expiration -* message-id -* priority -* redeliver -* reply-to -* routing-key -* timestamp -* type -* user-id - -For example, to get the RabbitMQ message's timestamp property -into the Logstash event's `@timestamp` field, use the date -filter to parse the `[@metadata][rabbitmq_properties][timestamp]` -field: -[source,ruby] - filter { - if [@metadata][rabbitmq_properties][timestamp] { - date { - match => ["[@metadata][rabbitmq_properties][timestamp]", "UNIX"] - } - } - } - -Additionally, any message headers will be saved in the -`[@metadata][rabbitmq_headers]` field. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rabbitmq Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ack>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-auto_delete>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclusive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_enabled>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefetch_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ack"] -===== `ack` - - * Value type is <> - * Default value is `true` - -Enable message acknowledgements. With acknowledgements -messages fetched by Logstash but not yet sent into the -Logstash pipeline will be requeued by the server if Logstash -shuts down. Acknowledgements will however hurt the message -throughput. - -This will only send an ack back every `prefetch_count` messages. -Working in batches provides a performance boost here. - -[id="{version}-plugins-{type}s-{plugin}-arguments"] -===== `arguments` - - * Value type is <> - * Default value is `{}` - -Extra queue arguments as an array. -To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` - -[id="{version}-plugins-{type}s-{plugin}-auto_delete"] -===== `auto_delete` - - * Value type is <> - * Default value is `false` - -Should the queue be deleted on the broker when the last consumer -disconnects? Set this option to `false` if you want the queue to remain -on the broker, queueing up messages until a consumer comes along to -consume them. - -[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] -===== `automatic_recovery` - - * Value type is <> - * Default value is `true` - -Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! - -[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] -===== `connect_retry_interval` - - * Value type is <> - * Default value is `1` - -Time in seconds to wait before retrying a connection - -[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] -===== `connection_timeout` - - * Value type is <> - * There is no default value for this setting. - -The default connection timeout in milliseconds. If not specified the timeout is infinite. - -[id="{version}-plugins-{type}s-{plugin}-durable"] -===== `durable` - - * Value type is <> - * Default value is `false` - -Is this queue durable? (aka; Should it survive a broker restart?) - -[id="{version}-plugins-{type}s-{plugin}-exchange"] -===== `exchange` - - * Value type is <> - * There is no default value for this setting. - -The name of the exchange to bind the queue to. Specify `exchange_type` -as well to declare the exchange if it does not exist - -[id="{version}-plugins-{type}s-{plugin}-exchange_type"] -===== `exchange_type` - - * Value type is <> - * There is no default value for this setting. - -The type of the exchange to bind to. Specifying this will cause this plugin -to declare the exchange if it does not exist. - -[id="{version}-plugins-{type}s-{plugin}-exclusive"] -===== `exclusive` - - * Value type is <> - * Default value is `false` - -Is the queue exclusive? Exclusive queues can only be used by the connection -that declared them and will be deleted when it is closed (e.g. due to a Logstash -restart). - -[id="{version}-plugins-{type}s-{plugin}-heartbeat"] -===== `heartbeat` - - * Value type is <> - * There is no default value for this setting. - -Heartbeat delay in seconds. If unspecified no heartbeats will be sent - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Common functionality for the rabbitmq input/output -RabbitMQ server address(es) -host can either be a single host, or a list of hosts -i.e. - host => "localhost" -or - host => ["host01", "host02] - -if multiple hosts are provided on the initial connection and any subsequent -recovery attempts of the hosts is chosen at random and connected to. -Note that only one host connection is active at a time. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * Default value is `"logstash"` - -The routing key to use when binding a queue to the exchange. -This is only relevant for direct or topic exchanges. - -* Routing keys are ignored on fanout exchanges. -* Wildcards are not valid on direct exchanges. - -[id="{version}-plugins-{type}s-{plugin}-metadata_enabled"] -===== `metadata_enabled` - - * Value type is <> - * Default value is `false` - -Enable the storage of message headers and properties in `@metadata`. This may impact performance - -[id="{version}-plugins-{type}s-{plugin}-passive"] -===== `passive` - - * Value type is <> - * Default value is `false` - -If true the queue will be passively declared, meaning it must -already exist on the server. To have Logstash create the queue -if necessary leave this option as false. If actively declaring -a queue that already exists, the queue options for this plugin -(durable etc) must match those of the existing queue. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ password - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5672` - -RabbitMQ port to connect on - -[id="{version}-plugins-{type}s-{plugin}-prefetch_count"] -===== `prefetch_count` - - * Value type is <> - * Default value is `256` - -Prefetch count. If acknowledgements are enabled with the `ack` -option, specifies the number of outstanding unacknowledged -messages allowed. - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * Value type is <> - * Default value is `""` - -The properties to extract from each message and store in a -@metadata field. - -Technically the exchange, redeliver, and routing-key -properties belong to the envelope and not the message but we -ignore that distinction here. However, we extract the -headers separately via get_headers even though the header -table technically is a message property. - -Freezing all strings so that code modifying the event's -@metadata field can't touch them. - -If updating this list, remember to update the documentation -above too. -The default codec for this plugin is JSON. You can override this to suit your particular needs however. -The name of the queue Logstash will consume events from. If -left empty, a transient queue with an randomly chosen name -will be created. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * There is no default value for this setting. - -Enable or disable SSL. -Note that by default remote certificate verification is off. -Specify ssl_certificate_path and ssl_certificate_password if you need -certificate verification - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] -===== `ssl_certificate_password` - - * Value type is <> - * There is no default value for this setting. - -Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] -===== `ssl_certificate_path` - - * Value type is <> - * There is no default value for this setting. - -Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host - -[id="{version}-plugins-{type}s-{plugin}-ssl_version"] -===== `ssl_version` - - * Value type is <> - * Default value is `"TLSv1.2"` - -Version of the SSL protocol to use. - -[id="{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds"] -===== `subscription_retry_interval_seconds` - - * This is a required setting. - * Value type is <> - * Default value is `5` - -Amount of time in seconds to wait after a failed subscription request -before retrying. Subscribes can fail if the server goes away and then comes back. - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - -[id="{version}-plugins-{type}s-{plugin}-tls_certificate_password"] -===== `tls_certificate_password` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -TLS certificate password - -[id="{version}-plugins-{type}s-{plugin}-tls_certificate_path"] -===== `tls_certificate_path` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -TLS certifcate path - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ username - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `"/"` - -The vhost (virtual host) to use. If you don't know what this -is, leave the default. With the exception of the default -vhost ("/"), names of vhosts should not begin with a forward -slash. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rabbitmq-v6.0.0.asciidoc b/docs/versioned-plugins/inputs/rabbitmq-v6.0.0.asciidoc deleted file mode 100644 index b746ba105..000000000 --- a/docs/versioned-plugins/inputs/rabbitmq-v6.0.0.asciidoc +++ /dev/null @@ -1,395 +0,0 @@ -:plugin: rabbitmq -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v6.0.0 -:release_date: 2017-08-02 -:changelog_url: https://github.com/logstash-plugins/logstash-input-rabbitmq/blob/v6.0.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Rabbitmq input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Pull events from a http://www.rabbitmq.com/[RabbitMQ] queue. - -The default settings will create an entirely transient queue and listen for all messages by default. -If you need durability or any other advanced settings, please set the appropriate options - -This plugin uses the http://rubymarchhare.info/[March Hare] library -for interacting with the RabbitMQ server. Most configuration options -map directly to standard RabbitMQ and AMQP concepts. The -https://www.rabbitmq.com/amqp-0-9-1-reference.html[AMQP 0-9-1 reference guide] -and other parts of the RabbitMQ documentation are useful for deeper -understanding. - -The properties of messages received will be stored in the -`[@metadata][rabbitmq_properties]` field if the `@metadata_enabled` setting is checked. -Note that storing metadata may degrade performance. -The following properties may be available (in most cases dependent on whether -they were set by the sender): - -* app-id -* cluster-id -* consumer-tag -* content-encoding -* content-type -* correlation-id -* delivery-mode -* exchange -* expiration -* message-id -* priority -* redeliver -* reply-to -* routing-key -* timestamp -* type -* user-id - -For example, to get the RabbitMQ message's timestamp property -into the Logstash event's `@timestamp` field, use the date -filter to parse the `[@metadata][rabbitmq_properties][timestamp]` -field: -[source,ruby] - filter { - if [@metadata][rabbitmq_properties][timestamp] { - date { - match => ["[@metadata][rabbitmq_properties][timestamp]", "UNIX"] - } - } - } - -Additionally, any message headers will be saved in the -`[@metadata][rabbitmq_headers]` field. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rabbitmq Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ack>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-auto_delete>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclusive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_enabled>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefetch_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ack"] -===== `ack` - - * Value type is <> - * Default value is `true` - -Enable message acknowledgements. With acknowledgements -messages fetched by Logstash but not yet sent into the -Logstash pipeline will be requeued by the server if Logstash -shuts down. Acknowledgements will however hurt the message -throughput. - -This will only send an ack back every `prefetch_count` messages. -Working in batches provides a performance boost here. - -[id="{version}-plugins-{type}s-{plugin}-arguments"] -===== `arguments` - - * Value type is <> - * Default value is `{}` - -Extra queue arguments as an array. -To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` - -[id="{version}-plugins-{type}s-{plugin}-auto_delete"] -===== `auto_delete` - - * Value type is <> - * Default value is `false` - -Should the queue be deleted on the broker when the last consumer -disconnects? Set this option to `false` if you want the queue to remain -on the broker, queueing up messages until a consumer comes along to -consume them. - -[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] -===== `automatic_recovery` - - * Value type is <> - * Default value is `true` - -Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! - -[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] -===== `connect_retry_interval` - - * Value type is <> - * Default value is `1` - -Time in seconds to wait before retrying a connection - -[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] -===== `connection_timeout` - - * Value type is <> - * There is no default value for this setting. - -The default connection timeout in milliseconds. If not specified the timeout is infinite. - -[id="{version}-plugins-{type}s-{plugin}-durable"] -===== `durable` - - * Value type is <> - * Default value is `false` - -Is this queue durable? (aka; Should it survive a broker restart?) - -[id="{version}-plugins-{type}s-{plugin}-exchange"] -===== `exchange` - - * Value type is <> - * There is no default value for this setting. - -The name of the exchange to bind the queue to. Specify `exchange_type` -as well to declare the exchange if it does not exist - -[id="{version}-plugins-{type}s-{plugin}-exchange_type"] -===== `exchange_type` - - * Value type is <> - * There is no default value for this setting. - -The type of the exchange to bind to. Specifying this will cause this plugin -to declare the exchange if it does not exist. - -[id="{version}-plugins-{type}s-{plugin}-exclusive"] -===== `exclusive` - - * Value type is <> - * Default value is `false` - -Is the queue exclusive? Exclusive queues can only be used by the connection -that declared them and will be deleted when it is closed (e.g. due to a Logstash -restart). - -[id="{version}-plugins-{type}s-{plugin}-heartbeat"] -===== `heartbeat` - - * Value type is <> - * There is no default value for this setting. - -Heartbeat delay in seconds. If unspecified no heartbeats will be sent - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Common functionality for the rabbitmq input/output -RabbitMQ server address(es) -host can either be a single host, or a list of hosts -i.e. - host => "localhost" -or - host => ["host01", "host02] - -if multiple hosts are provided on the initial connection and any subsequent -recovery attempts of the hosts is chosen at random and connected to. -Note that only one host connection is active at a time. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * Default value is `"logstash"` - -The routing key to use when binding a queue to the exchange. -This is only relevant for direct or topic exchanges. - -* Routing keys are ignored on fanout exchanges. -* Wildcards are not valid on direct exchanges. - -[id="{version}-plugins-{type}s-{plugin}-metadata_enabled"] -===== `metadata_enabled` - - * Value type is <> - * Default value is `false` - -Enable the storage of message headers and properties in `@metadata`. This may impact performance - -[id="{version}-plugins-{type}s-{plugin}-passive"] -===== `passive` - - * Value type is <> - * Default value is `false` - -If true the queue will be passively declared, meaning it must -already exist on the server. To have Logstash create the queue -if necessary leave this option as false. If actively declaring -a queue that already exists, the queue options for this plugin -(durable etc) must match those of the existing queue. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ password - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5672` - -RabbitMQ port to connect on - -[id="{version}-plugins-{type}s-{plugin}-prefetch_count"] -===== `prefetch_count` - - * Value type is <> - * Default value is `256` - -Prefetch count. If acknowledgements are enabled with the `ack` -option, specifies the number of outstanding unacknowledged -messages allowed. - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * Value type is <> - * Default value is `""` - -The properties to extract from each message and store in a -@metadata field. - -Technically the exchange, redeliver, and routing-key -properties belong to the envelope and not the message but we -ignore that distinction here. However, we extract the -headers separately via get_headers even though the header -table technically is a message property. - -Freezing all strings so that code modifying the event's -@metadata field can't touch them. - -If updating this list, remember to update the documentation -above too. -The default codec for this plugin is JSON. You can override this to suit your particular needs however. -The name of the queue Logstash will consume events from. If -left empty, a transient queue with an randomly chosen name -will be created. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * There is no default value for this setting. - -Enable or disable SSL. -Note that by default remote certificate verification is off. -Specify ssl_certificate_path and ssl_certificate_password if you need -certificate verification - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] -===== `ssl_certificate_password` - - * Value type is <> - * There is no default value for this setting. - -Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] -===== `ssl_certificate_path` - - * Value type is <> - * There is no default value for this setting. - -Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host - -[id="{version}-plugins-{type}s-{plugin}-ssl_version"] -===== `ssl_version` - - * Value type is <> - * Default value is `"TLSv1.2"` - -Version of the SSL protocol to use. - -[id="{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds"] -===== `subscription_retry_interval_seconds` - - * This is a required setting. - * Value type is <> - * Default value is `5` - -Amount of time in seconds to wait after a failed subscription request -before retrying. Subscribes can fail if the server goes away and then comes back. - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ username - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `"/"` - -The vhost (virtual host) to use. If you don't know what this -is, leave the default. With the exception of the default -vhost ("/"), names of vhosts should not begin with a forward -slash. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rabbitmq-v6.0.1.asciidoc b/docs/versioned-plugins/inputs/rabbitmq-v6.0.1.asciidoc deleted file mode 100644 index 29eae981e..000000000 --- a/docs/versioned-plugins/inputs/rabbitmq-v6.0.1.asciidoc +++ /dev/null @@ -1,395 +0,0 @@ -:plugin: rabbitmq -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v6.0.1 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-rabbitmq/blob/v6.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Rabbitmq input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Pull events from a http://www.rabbitmq.com/[RabbitMQ] queue. - -The default settings will create an entirely transient queue and listen for all messages by default. -If you need durability or any other advanced settings, please set the appropriate options - -This plugin uses the http://rubymarchhare.info/[March Hare] library -for interacting with the RabbitMQ server. Most configuration options -map directly to standard RabbitMQ and AMQP concepts. The -https://www.rabbitmq.com/amqp-0-9-1-reference.html[AMQP 0-9-1 reference guide] -and other parts of the RabbitMQ documentation are useful for deeper -understanding. - -The properties of messages received will be stored in the -`[@metadata][rabbitmq_properties]` field if the `@metadata_enabled` setting is checked. -Note that storing metadata may degrade performance. -The following properties may be available (in most cases dependent on whether -they were set by the sender): - -* app-id -* cluster-id -* consumer-tag -* content-encoding -* content-type -* correlation-id -* delivery-mode -* exchange -* expiration -* message-id -* priority -* redeliver -* reply-to -* routing-key -* timestamp -* type -* user-id - -For example, to get the RabbitMQ message's timestamp property -into the Logstash event's `@timestamp` field, use the date -filter to parse the `[@metadata][rabbitmq_properties][timestamp]` -field: -[source,ruby] - filter { - if [@metadata][rabbitmq_properties][timestamp] { - date { - match => ["[@metadata][rabbitmq_properties][timestamp]", "UNIX"] - } - } - } - -Additionally, any message headers will be saved in the -`[@metadata][rabbitmq_headers]` field. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rabbitmq Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ack>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-auto_delete>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclusive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_enabled>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefetch_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ack"] -===== `ack` - - * Value type is <> - * Default value is `true` - -Enable message acknowledgements. With acknowledgements -messages fetched by Logstash but not yet sent into the -Logstash pipeline will be requeued by the server if Logstash -shuts down. Acknowledgements will however hurt the message -throughput. - -This will only send an ack back every `prefetch_count` messages. -Working in batches provides a performance boost here. - -[id="{version}-plugins-{type}s-{plugin}-arguments"] -===== `arguments` - - * Value type is <> - * Default value is `{}` - -Extra queue arguments as an array. -To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` - -[id="{version}-plugins-{type}s-{plugin}-auto_delete"] -===== `auto_delete` - - * Value type is <> - * Default value is `false` - -Should the queue be deleted on the broker when the last consumer -disconnects? Set this option to `false` if you want the queue to remain -on the broker, queueing up messages until a consumer comes along to -consume them. - -[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] -===== `automatic_recovery` - - * Value type is <> - * Default value is `true` - -Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! - -[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] -===== `connect_retry_interval` - - * Value type is <> - * Default value is `1` - -Time in seconds to wait before retrying a connection - -[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] -===== `connection_timeout` - - * Value type is <> - * There is no default value for this setting. - -The default connection timeout in milliseconds. If not specified the timeout is infinite. - -[id="{version}-plugins-{type}s-{plugin}-durable"] -===== `durable` - - * Value type is <> - * Default value is `false` - -Is this queue durable? (aka; Should it survive a broker restart?) - -[id="{version}-plugins-{type}s-{plugin}-exchange"] -===== `exchange` - - * Value type is <> - * There is no default value for this setting. - -The name of the exchange to bind the queue to. Specify `exchange_type` -as well to declare the exchange if it does not exist - -[id="{version}-plugins-{type}s-{plugin}-exchange_type"] -===== `exchange_type` - - * Value type is <> - * There is no default value for this setting. - -The type of the exchange to bind to. Specifying this will cause this plugin -to declare the exchange if it does not exist. - -[id="{version}-plugins-{type}s-{plugin}-exclusive"] -===== `exclusive` - - * Value type is <> - * Default value is `false` - -Is the queue exclusive? Exclusive queues can only be used by the connection -that declared them and will be deleted when it is closed (e.g. due to a Logstash -restart). - -[id="{version}-plugins-{type}s-{plugin}-heartbeat"] -===== `heartbeat` - - * Value type is <> - * There is no default value for this setting. - -Heartbeat delay in seconds. If unspecified no heartbeats will be sent - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Common functionality for the rabbitmq input/output -RabbitMQ server address(es) -host can either be a single host, or a list of hosts -i.e. - host => "localhost" -or - host => ["host01", "host02] - -if multiple hosts are provided on the initial connection and any subsequent -recovery attempts of the hosts is chosen at random and connected to. -Note that only one host connection is active at a time. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * Default value is `"logstash"` - -The routing key to use when binding a queue to the exchange. -This is only relevant for direct or topic exchanges. - -* Routing keys are ignored on fanout exchanges. -* Wildcards are not valid on direct exchanges. - -[id="{version}-plugins-{type}s-{plugin}-metadata_enabled"] -===== `metadata_enabled` - - * Value type is <> - * Default value is `false` - -Enable the storage of message headers and properties in `@metadata`. This may impact performance - -[id="{version}-plugins-{type}s-{plugin}-passive"] -===== `passive` - - * Value type is <> - * Default value is `false` - -If true the queue will be passively declared, meaning it must -already exist on the server. To have Logstash create the queue -if necessary leave this option as false. If actively declaring -a queue that already exists, the queue options for this plugin -(durable etc) must match those of the existing queue. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ password - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5672` - -RabbitMQ port to connect on - -[id="{version}-plugins-{type}s-{plugin}-prefetch_count"] -===== `prefetch_count` - - * Value type is <> - * Default value is `256` - -Prefetch count. If acknowledgements are enabled with the `ack` -option, specifies the number of outstanding unacknowledged -messages allowed. - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * Value type is <> - * Default value is `""` - -The properties to extract from each message and store in a -@metadata field. - -Technically the exchange, redeliver, and routing-key -properties belong to the envelope and not the message but we -ignore that distinction here. However, we extract the -headers separately via get_headers even though the header -table technically is a message property. - -Freezing all strings so that code modifying the event's -@metadata field can't touch them. - -If updating this list, remember to update the documentation -above too. -The default codec for this plugin is JSON. You can override this to suit your particular needs however. -The name of the queue Logstash will consume events from. If -left empty, a transient queue with an randomly chosen name -will be created. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * There is no default value for this setting. - -Enable or disable SSL. -Note that by default remote certificate verification is off. -Specify ssl_certificate_path and ssl_certificate_password if you need -certificate verification - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] -===== `ssl_certificate_password` - - * Value type is <> - * There is no default value for this setting. - -Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] -===== `ssl_certificate_path` - - * Value type is <> - * There is no default value for this setting. - -Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host - -[id="{version}-plugins-{type}s-{plugin}-ssl_version"] -===== `ssl_version` - - * Value type is <> - * Default value is `"TLSv1.2"` - -Version of the SSL protocol to use. - -[id="{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds"] -===== `subscription_retry_interval_seconds` - - * This is a required setting. - * Value type is <> - * Default value is `5` - -Amount of time in seconds to wait after a failed subscription request -before retrying. Subscribes can fail if the server goes away and then comes back. - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ username - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `"/"` - -The vhost (virtual host) to use. If you don't know what this -is, leave the default. With the exception of the default -vhost ("/"), names of vhosts should not begin with a forward -slash. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rabbitmq-v6.0.2.asciidoc b/docs/versioned-plugins/inputs/rabbitmq-v6.0.2.asciidoc deleted file mode 100644 index 44a58bb02..000000000 --- a/docs/versioned-plugins/inputs/rabbitmq-v6.0.2.asciidoc +++ /dev/null @@ -1,395 +0,0 @@ -:plugin: rabbitmq -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v6.0.2 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-rabbitmq/blob/v6.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Rabbitmq input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Pull events from a http://www.rabbitmq.com/[RabbitMQ] queue. - -The default settings will create an entirely transient queue and listen for all messages by default. -If you need durability or any other advanced settings, please set the appropriate options - -This plugin uses the http://rubymarchhare.info/[March Hare] library -for interacting with the RabbitMQ server. Most configuration options -map directly to standard RabbitMQ and AMQP concepts. The -https://www.rabbitmq.com/amqp-0-9-1-reference.html[AMQP 0-9-1 reference guide] -and other parts of the RabbitMQ documentation are useful for deeper -understanding. - -The properties of messages received will be stored in the -`[@metadata][rabbitmq_properties]` field if the `@metadata_enabled` setting is checked. -Note that storing metadata may degrade performance. -The following properties may be available (in most cases dependent on whether -they were set by the sender): - -* app-id -* cluster-id -* consumer-tag -* content-encoding -* content-type -* correlation-id -* delivery-mode -* exchange -* expiration -* message-id -* priority -* redeliver -* reply-to -* routing-key -* timestamp -* type -* user-id - -For example, to get the RabbitMQ message's timestamp property -into the Logstash event's `@timestamp` field, use the date -filter to parse the `[@metadata][rabbitmq_properties][timestamp]` -field: -[source,ruby] - filter { - if [@metadata][rabbitmq_properties][timestamp] { - date { - match => ["[@metadata][rabbitmq_properties][timestamp]", "UNIX"] - } - } - } - -Additionally, any message headers will be saved in the -`[@metadata][rabbitmq_headers]` field. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rabbitmq Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ack>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-auto_delete>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclusive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_enabled>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefetch_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ack"] -===== `ack` - - * Value type is <> - * Default value is `true` - -Enable message acknowledgements. With acknowledgements -messages fetched by Logstash but not yet sent into the -Logstash pipeline will be requeued by the server if Logstash -shuts down. Acknowledgements will however hurt the message -throughput. - -This will only send an ack back every `prefetch_count` messages. -Working in batches provides a performance boost here. - -[id="{version}-plugins-{type}s-{plugin}-arguments"] -===== `arguments` - - * Value type is <> - * Default value is `{}` - -Extra queue arguments as an array. -To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` - -[id="{version}-plugins-{type}s-{plugin}-auto_delete"] -===== `auto_delete` - - * Value type is <> - * Default value is `false` - -Should the queue be deleted on the broker when the last consumer -disconnects? Set this option to `false` if you want the queue to remain -on the broker, queueing up messages until a consumer comes along to -consume them. - -[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] -===== `automatic_recovery` - - * Value type is <> - * Default value is `true` - -Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! - -[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] -===== `connect_retry_interval` - - * Value type is <> - * Default value is `1` - -Time in seconds to wait before retrying a connection - -[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] -===== `connection_timeout` - - * Value type is <> - * There is no default value for this setting. - -The default connection timeout in milliseconds. If not specified the timeout is infinite. - -[id="{version}-plugins-{type}s-{plugin}-durable"] -===== `durable` - - * Value type is <> - * Default value is `false` - -Is this queue durable? (aka; Should it survive a broker restart?) - -[id="{version}-plugins-{type}s-{plugin}-exchange"] -===== `exchange` - - * Value type is <> - * There is no default value for this setting. - -The name of the exchange to bind the queue to. Specify `exchange_type` -as well to declare the exchange if it does not exist - -[id="{version}-plugins-{type}s-{plugin}-exchange_type"] -===== `exchange_type` - - * Value type is <> - * There is no default value for this setting. - -The type of the exchange to bind to. Specifying this will cause this plugin -to declare the exchange if it does not exist. - -[id="{version}-plugins-{type}s-{plugin}-exclusive"] -===== `exclusive` - - * Value type is <> - * Default value is `false` - -Is the queue exclusive? Exclusive queues can only be used by the connection -that declared them and will be deleted when it is closed (e.g. due to a Logstash -restart). - -[id="{version}-plugins-{type}s-{plugin}-heartbeat"] -===== `heartbeat` - - * Value type is <> - * There is no default value for this setting. - -Heartbeat delay in seconds. If unspecified no heartbeats will be sent - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Common functionality for the rabbitmq input/output -RabbitMQ server address(es) -host can either be a single host, or a list of hosts -i.e. - host => "localhost" -or - host => ["host01", "host02] - -if multiple hosts are provided on the initial connection and any subsequent -recovery attempts of the hosts is chosen at random and connected to. -Note that only one host connection is active at a time. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * Default value is `"logstash"` - -The routing key to use when binding a queue to the exchange. -This is only relevant for direct or topic exchanges. - -* Routing keys are ignored on fanout exchanges. -* Wildcards are not valid on direct exchanges. - -[id="{version}-plugins-{type}s-{plugin}-metadata_enabled"] -===== `metadata_enabled` - - * Value type is <> - * Default value is `false` - -Enable the storage of message headers and properties in `@metadata`. This may impact performance - -[id="{version}-plugins-{type}s-{plugin}-passive"] -===== `passive` - - * Value type is <> - * Default value is `false` - -If true the queue will be passively declared, meaning it must -already exist on the server. To have Logstash create the queue -if necessary leave this option as false. If actively declaring -a queue that already exists, the queue options for this plugin -(durable etc) must match those of the existing queue. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ password - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5672` - -RabbitMQ port to connect on - -[id="{version}-plugins-{type}s-{plugin}-prefetch_count"] -===== `prefetch_count` - - * Value type is <> - * Default value is `256` - -Prefetch count. If acknowledgements are enabled with the `ack` -option, specifies the number of outstanding unacknowledged -messages allowed. - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * Value type is <> - * Default value is `""` - -The properties to extract from each message and store in a -@metadata field. - -Technically the exchange, redeliver, and routing-key -properties belong to the envelope and not the message but we -ignore that distinction here. However, we extract the -headers separately via get_headers even though the header -table technically is a message property. - -Freezing all strings so that code modifying the event's -@metadata field can't touch them. - -If updating this list, remember to update the documentation -above too. -The default codec for this plugin is JSON. You can override this to suit your particular needs however. -The name of the queue Logstash will consume events from. If -left empty, a transient queue with an randomly chosen name -will be created. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * There is no default value for this setting. - -Enable or disable SSL. -Note that by default remote certificate verification is off. -Specify ssl_certificate_path and ssl_certificate_password if you need -certificate verification - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] -===== `ssl_certificate_password` - - * Value type is <> - * There is no default value for this setting. - -Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] -===== `ssl_certificate_path` - - * Value type is <> - * There is no default value for this setting. - -Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host - -[id="{version}-plugins-{type}s-{plugin}-ssl_version"] -===== `ssl_version` - - * Value type is <> - * Default value is `"TLSv1.2"` - -Version of the SSL protocol to use. - -[id="{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds"] -===== `subscription_retry_interval_seconds` - - * This is a required setting. - * Value type is <> - * Default value is `5` - -Amount of time in seconds to wait after a failed subscription request -before retrying. Subscribes can fail if the server goes away and then comes back. - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ username - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `"/"` - -The vhost (virtual host) to use. If you don't know what this -is, leave the default. With the exception of the default -vhost ("/"), names of vhosts should not begin with a forward -slash. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rackspace-index.asciidoc b/docs/versioned-plugins/inputs/rackspace-index.asciidoc deleted file mode 100644 index 3314354f0..000000000 --- a/docs/versioned-plugins/inputs/rackspace-index.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -:plugin: rackspace -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-06-23 -|======================================================================= - -include::rackspace-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/rackspace-v3.0.1.asciidoc b/docs/versioned-plugins/inputs/rackspace-v3.0.1.asciidoc deleted file mode 100644 index 2dce67d9b..000000000 --- a/docs/versioned-plugins/inputs/rackspace-v3.0.1.asciidoc +++ /dev/null @@ -1,102 +0,0 @@ -:plugin: rackspace -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-rackspace/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Rackspace input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rackspace Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-claim>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-api_key"] -===== `api_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Rackspace Cloud API Key - -[id="{version}-plugins-{type}s-{plugin}-claim"] -===== `claim` - - * Value type is <> - * Default value is `1` - -number of messages to claim -Min: 1, Max: 10 - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * Value type is <> - * Default value is `"logstash"` - -Rackspace Queue Name - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value type is <> - * Default value is `"dfw"` - -Rackspace region -`ord, dfw, lon, syd,` etc - -[id="{version}-plugins-{type}s-{plugin}-ttl"] -===== `ttl` - - * Value type is <> - * Default value is `60` - -length of time to hold claim -Min: 60 - -[id="{version}-plugins-{type}s-{plugin}-username"] -===== `username` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Rackspace Cloud Username - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/redis-index.asciidoc b/docs/versioned-plugins/inputs/redis-index.asciidoc deleted file mode 100644 index 71634f4b1..000000000 --- a/docs/versioned-plugins/inputs/redis-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: redis -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-09-12 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::redis-v3.1.6.asciidoc[] -include::redis-v3.1.5.asciidoc[] -include::redis-v3.1.4.asciidoc[] -include::redis-v3.1.3.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/redis-v3.1.3.asciidoc b/docs/versioned-plugins/inputs/redis-v3.1.3.asciidoc deleted file mode 100644 index 0606e9ede..000000000 --- a/docs/versioned-plugins/inputs/redis-v3.1.3.asciidoc +++ /dev/null @@ -1,139 +0,0 @@ -:plugin: redis -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-redis/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Redis input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will read events from a Redis instance; it supports both Redis channels and lists. -The list command (BLPOP) used by Logstash is supported in Redis v1.3.1+, and -the channel commands used by Logstash are found in Redis v1.3.8+. -While you may be able to make these Redis versions work, the best performance -and stability will be found in more recent stable versions. Versions 2.6.0+ -are recommended. - -For more information about Redis, see - -`batch_count` note: If you use the `batch_count` setting, you *must* use a Redis version 2.6.0 or -newer. Anything older does not support the operations used by batching. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Redis Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-batch_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel", "pattern_channel"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-batch_count"] -===== `batch_count` - - * Value type is <> - * Default value is `125` - -The number of events to return from Redis using EVAL. - -[id="{version}-plugins-{type}s-{plugin}-data_type"] -===== `data_type` - - * This is a required setting. - * Value can be any of: `list`, `channel`, `pattern_channel` - * There is no default value for this setting. - -Specify either list or channel. If `redis\_type` is `list`, then we will BLPOP the -key. If `redis\_type` is `channel`, then we will SUBSCRIBE to the key. -If `redis\_type` is `pattern_channel`, then we will PSUBSCRIBE to the key. - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * Value type is <> - * Default value is `0` - -The Redis database number. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"127.0.0.1"` - -The hostname of your Redis server. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of a Redis list or channel. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to authenticate with. There is no authentication by default. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6379` - -The port to connect on. - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `5` - -Initial connection timeout in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/redis-v3.1.4.asciidoc b/docs/versioned-plugins/inputs/redis-v3.1.4.asciidoc deleted file mode 100644 index 18e4636d8..000000000 --- a/docs/versioned-plugins/inputs/redis-v3.1.4.asciidoc +++ /dev/null @@ -1,139 +0,0 @@ -:plugin: redis -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.4 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-redis/blob/v3.1.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Redis input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will read events from a Redis instance; it supports both Redis channels and lists. -The list command (BLPOP) used by Logstash is supported in Redis v1.3.1+, and -the channel commands used by Logstash are found in Redis v1.3.8+. -While you may be able to make these Redis versions work, the best performance -and stability will be found in more recent stable versions. Versions 2.6.0+ -are recommended. - -For more information about Redis, see - -`batch_count` note: If you use the `batch_count` setting, you *must* use a Redis version 2.6.0 or -newer. Anything older does not support the operations used by batching. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Redis Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-batch_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel", "pattern_channel"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-batch_count"] -===== `batch_count` - - * Value type is <> - * Default value is `125` - -The number of events to return from Redis using EVAL. - -[id="{version}-plugins-{type}s-{plugin}-data_type"] -===== `data_type` - - * This is a required setting. - * Value can be any of: `list`, `channel`, `pattern_channel` - * There is no default value for this setting. - -Specify either list or channel. If `redis\_type` is `list`, then we will BLPOP the -key. If `redis\_type` is `channel`, then we will SUBSCRIBE to the key. -If `redis\_type` is `pattern_channel`, then we will PSUBSCRIBE to the key. - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * Value type is <> - * Default value is `0` - -The Redis database number. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"127.0.0.1"` - -The hostname of your Redis server. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of a Redis list or channel. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to authenticate with. There is no authentication by default. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6379` - -The port to connect on. - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `5` - -Initial connection timeout in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/redis-v3.1.5.asciidoc b/docs/versioned-plugins/inputs/redis-v3.1.5.asciidoc deleted file mode 100644 index 25ee9a42a..000000000 --- a/docs/versioned-plugins/inputs/redis-v3.1.5.asciidoc +++ /dev/null @@ -1,139 +0,0 @@ -:plugin: redis -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.5 -:release_date: 2017-09-12 -:changelog_url: https://github.com/logstash-plugins/logstash-input-redis/blob/v3.1.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Redis input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will read events from a Redis instance; it supports both Redis channels and lists. -The list command (BLPOP) used by Logstash is supported in Redis v1.3.1+, and -the channel commands used by Logstash are found in Redis v1.3.8+. -While you may be able to make these Redis versions work, the best performance -and stability will be found in more recent stable versions. Versions 2.6.0+ -are recommended. - -For more information about Redis, see - -`batch_count` note: If you use the `batch_count` setting, you *must* use a Redis version 2.6.0 or -newer. Anything older does not support the operations used by batching. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Redis Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-batch_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel", "pattern_channel"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-batch_count"] -===== `batch_count` - - * Value type is <> - * Default value is `125` - -The number of events to return from Redis using EVAL. - -[id="{version}-plugins-{type}s-{plugin}-data_type"] -===== `data_type` - - * This is a required setting. - * Value can be any of: `list`, `channel`, `pattern_channel` - * There is no default value for this setting. - -Specify either list or channel. If `redis\_type` is `list`, then we will BLPOP the -key. If `redis\_type` is `channel`, then we will SUBSCRIBE to the key. -If `redis\_type` is `pattern_channel`, then we will PSUBSCRIBE to the key. - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * Value type is <> - * Default value is `0` - -The Redis database number. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"127.0.0.1"` - -The hostname of your Redis server. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of a Redis list or channel. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to authenticate with. There is no authentication by default. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6379` - -The port to connect on. - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `5` - -Initial connection timeout in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/redis-v3.1.6.asciidoc b/docs/versioned-plugins/inputs/redis-v3.1.6.asciidoc deleted file mode 100644 index d7817f32a..000000000 --- a/docs/versioned-plugins/inputs/redis-v3.1.6.asciidoc +++ /dev/null @@ -1,139 +0,0 @@ -:plugin: redis -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.6 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-redis/blob/v3.1.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Redis input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input will read events from a Redis instance; it supports both Redis channels and lists. -The list command (BLPOP) used by Logstash is supported in Redis v1.3.1+, and -the channel commands used by Logstash are found in Redis v1.3.8+. -While you may be able to make these Redis versions work, the best performance -and stability will be found in more recent stable versions. Versions 2.6.0+ -are recommended. - -For more information about Redis, see - -`batch_count` note: If you use the `batch_count` setting, you *must* use a Redis version 2.6.0 or -newer. Anything older does not support the operations used by batching. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Redis Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-batch_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel", "pattern_channel"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-batch_count"] -===== `batch_count` - - * Value type is <> - * Default value is `125` - -The number of events to return from Redis using EVAL. - -[id="{version}-plugins-{type}s-{plugin}-data_type"] -===== `data_type` - - * This is a required setting. - * Value can be any of: `list`, `channel`, `pattern_channel` - * There is no default value for this setting. - -Specify either list or channel. If `redis\_type` is `list`, then we will BLPOP the -key. If `redis\_type` is `channel`, then we will SUBSCRIBE to the key. -If `redis\_type` is `pattern_channel`, then we will PSUBSCRIBE to the key. - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * Value type is <> - * Default value is `0` - -The Redis database number. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"127.0.0.1"` - -The hostname of your Redis server. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of a Redis list or channel. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to authenticate with. There is no authentication by default. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6379` - -The port to connect on. - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `5` - -Initial connection timeout in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/relp-index.asciidoc b/docs/versioned-plugins/inputs/relp-index.asciidoc deleted file mode 100644 index edd7f331c..000000000 --- a/docs/versioned-plugins/inputs/relp-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: relp -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::relp-v3.0.3.asciidoc[] -include::relp-v3.0.2.asciidoc[] -include::relp-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/relp-v3.0.1.asciidoc b/docs/versioned-plugins/inputs/relp-v3.0.1.asciidoc deleted file mode 100644 index b6624af26..000000000 --- a/docs/versioned-plugins/inputs/relp-v3.0.1.asciidoc +++ /dev/null @@ -1,126 +0,0 @@ -:plugin: relp -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-relp/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Relp input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read RELP events over a TCP socket. - -For more information about RELP, see - - -This protocol implements application-level acknowledgements to help protect -against message loss. - -Message acks only function as far as messages being put into the queue for -filters; anything lost after that point will not be retransmitted - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Relp Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address to listen on. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The port to listen on. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] -===== `ssl_cacert` - - * Value type is <> - * There is no default value for this setting. - -The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] -===== `ssl_cert` - - * Value type is <> - * There is no default value for this setting. - -SSL certificate path - -[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] -===== `ssl_enable` - - * Value type is <> - * Default value is `false` - -Enable SSL (must be set for other `ssl_` options to take effect). - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is <> - * There is no default value for this setting. - -SSL key path - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is <> - * Default value is `nil` - -SSL key passphrase - -[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] -===== `ssl_verify` - - * Value type is <> - * Default value is `true` - -Verify the identity of the other end of the SSL connection against the CA. -For input, sets the field `sslsubject` to that of the client certificate. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/relp-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/relp-v3.0.2.asciidoc deleted file mode 100644 index f6b536910..000000000 --- a/docs/versioned-plugins/inputs/relp-v3.0.2.asciidoc +++ /dev/null @@ -1,126 +0,0 @@ -:plugin: relp -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-relp/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Relp input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read RELP events over a TCP socket. - -For more information about RELP, see - - -This protocol implements application-level acknowledgements to help protect -against message loss. - -Message acks only function as far as messages being put into the queue for -filters; anything lost after that point will not be retransmitted - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Relp Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address to listen on. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The port to listen on. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] -===== `ssl_cacert` - - * Value type is <> - * There is no default value for this setting. - -The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] -===== `ssl_cert` - - * Value type is <> - * There is no default value for this setting. - -SSL certificate path - -[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] -===== `ssl_enable` - - * Value type is <> - * Default value is `false` - -Enable SSL (must be set for other `ssl_` options to take effect). - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is <> - * There is no default value for this setting. - -SSL key path - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is <> - * Default value is `nil` - -SSL key passphrase - -[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] -===== `ssl_verify` - - * Value type is <> - * Default value is `true` - -Verify the identity of the other end of the SSL connection against the CA. -For input, sets the field `sslsubject` to that of the client certificate. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/relp-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/relp-v3.0.3.asciidoc deleted file mode 100644 index d6208e93e..000000000 --- a/docs/versioned-plugins/inputs/relp-v3.0.3.asciidoc +++ /dev/null @@ -1,126 +0,0 @@ -:plugin: relp -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-relp/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Relp input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read RELP events over a TCP socket. - -For more information about RELP, see - - -This protocol implements application-level acknowledgements to help protect -against message loss. - -Message acks only function as far as messages being put into the queue for -filters; anything lost after that point will not be retransmitted - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Relp Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address to listen on. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The port to listen on. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] -===== `ssl_cacert` - - * Value type is <> - * There is no default value for this setting. - -The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] -===== `ssl_cert` - - * Value type is <> - * There is no default value for this setting. - -SSL certificate path - -[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] -===== `ssl_enable` - - * Value type is <> - * Default value is `false` - -Enable SSL (must be set for other `ssl_` options to take effect). - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is <> - * There is no default value for this setting. - -SSL key path - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is <> - * Default value is `nil` - -SSL key passphrase - -[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] -===== `ssl_verify` - - * Value type is <> - * Default value is `true` - -Verify the identity of the other end of the SSL connection against the CA. -For input, sets the field `sslsubject` to that of the client certificate. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rss-index.asciidoc b/docs/versioned-plugins/inputs/rss-index.asciidoc deleted file mode 100644 index 9dfad7247..000000000 --- a/docs/versioned-plugins/inputs/rss-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: rss -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::rss-v3.0.4.asciidoc[] -include::rss-v3.0.3.asciidoc[] -include::rss-v3.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/rss-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/rss-v3.0.2.asciidoc deleted file mode 100644 index ee9a436f3..000000000 --- a/docs/versioned-plugins/inputs/rss-v3.0.2.asciidoc +++ /dev/null @@ -1,70 +0,0 @@ -:plugin: rss -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-rss/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Rss input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Run command line tools and capture the whole output as an event. - -Notes: - -* The `@source` of this event will be the command run. -* The `@message` of this event will be the entire stdout of the command - as one event. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rss Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Interval to run the command. Value is in seconds. - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -RSS/Atom feed URL - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rss-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/rss-v3.0.3.asciidoc deleted file mode 100644 index 54d360e8e..000000000 --- a/docs/versioned-plugins/inputs/rss-v3.0.3.asciidoc +++ /dev/null @@ -1,70 +0,0 @@ -:plugin: rss -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-rss/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Rss input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Run command line tools and capture the whole output as an event. - -Notes: - -* The `@source` of this event will be the command run. -* The `@message` of this event will be the entire stdout of the command - as one event. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rss Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Interval to run the command. Value is in seconds. - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -RSS/Atom feed URL - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/rss-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/rss-v3.0.4.asciidoc deleted file mode 100644 index 0b8046065..000000000 --- a/docs/versioned-plugins/inputs/rss-v3.0.4.asciidoc +++ /dev/null @@ -1,70 +0,0 @@ -:plugin: rss -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-rss/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Rss input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Run command line tools and capture the whole output as an event. - -Notes: - -* The `@source` of this event will be the command run. -* The `@message` of this event will be the entire stdout of the command - as one event. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rss Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Interval to run the command. Value is in seconds. - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -RSS/Atom feed URL - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/s3sqs-index.asciidoc b/docs/versioned-plugins/inputs/s3sqs-index.asciidoc deleted file mode 100644 index 6f9f04b8b..000000000 --- a/docs/versioned-plugins/inputs/s3sqs-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: s3sqs -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/inputs/salesforce-index.asciidoc b/docs/versioned-plugins/inputs/salesforce-index.asciidoc deleted file mode 100644 index 469202798..000000000 --- a/docs/versioned-plugins/inputs/salesforce-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: salesforce -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::salesforce-v3.0.3.asciidoc[] -include::salesforce-v3.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/salesforce-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/salesforce-v3.0.2.asciidoc deleted file mode 100644 index 3e692140c..000000000 --- a/docs/versioned-plugins/inputs/salesforce-v3.0.2.asciidoc +++ /dev/null @@ -1,199 +0,0 @@ -:plugin: salesforce -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-salesforce/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Salesforce input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This Logstash input plugin allows you to query Salesforce using SOQL and puts the results -into Logstash, one row per event. You can configure it to pull entire sObjects or only -specific fields. - -NOTE: This input plugin will stop after all the results of the query are processed and will -need to be re-run to fetch new results. It does not utilize the streaming API. - -In order to use this plugin, you will need to create a new SFDC Application using -oauth. More details can be found here: -https://help.salesforce.com/apex/HTViewHelpDoc?id=connected_app_create.htm - -You will also need a username, password, and security token for your salesforce instance. -More details for generating a token can be found here: -https://help.salesforce.com/apex/HTViewHelpDoc?id=user_security_token.htm - -In addition to specifying an sObject, you can also supply a list of API fields -that will be used in the SOQL query. - -==== Example -This example prints all the Salesforce Opportunities to standard out - -[source,ruby] ----------------------------------- -input { - salesforce { - client_id => 'OAUTH CLIENT ID FROM YOUR SFDC APP' - client_secret => 'OAUTH CLIENT SECRET FROM YOUR SFDC APP' - username => 'email@example.com' - password => 'super-secret' - security_token => 'SECURITY TOKEN FOR THIS USER' - sfdc_object_name => 'Opportunity' - } -} - -output { - stdout { - codec => rubydebug - } -} ----------------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Salesforce Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-api_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-client_secret>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-security_token>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-sfdc_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sfdc_filters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sfdc_object_name>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-to_underscores>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_test_sandbox>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-api_version"] -===== `api_version` - - * Value type is <> - * There is no default value for this setting. - -By default, this uses the default Restforce API version. -To override this, set this to something like "32.0" for example - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Consumer Key for authentication. You must set up a new SFDC -connected app with oath to use this output. More information -can be found here: -https://help.salesforce.com/apex/HTViewHelpDoc?id=connected_app_create.htm - -[id="{version}-plugins-{type}s-{plugin}-client_secret"] -===== `client_secret` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Consumer Secret from your oauth enabled connected app - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The password used to login to sfdc - -[id="{version}-plugins-{type}s-{plugin}-security_token"] -===== `security_token` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The security token for this account. For more information about -generting a security token, see: -https://help.salesforce.com/apex/HTViewHelpDoc?id=user_security_token.htm - -[id="{version}-plugins-{type}s-{plugin}-sfdc_fields"] -===== `sfdc_fields` - - * Value type is <> - * Default value is `[]` - -These are the field names to return in the Salesforce query -If this is empty, all fields are returned. - -[id="{version}-plugins-{type}s-{plugin}-sfdc_filters"] -===== `sfdc_filters` - - * Value type is <> - * Default value is `""` - -These options will be added to the WHERE clause in the -SOQL statement. Additional fields can be filtered on by -adding field1 = value1 AND field2 = value2 AND... - -[id="{version}-plugins-{type}s-{plugin}-sfdc_object_name"] -===== `sfdc_object_name` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the salesforce object you are creating or updating - -[id="{version}-plugins-{type}s-{plugin}-to_underscores"] -===== `to_underscores` - - * Value type is <> - * Default value is `false` - -Setting this to true will convert SFDC's NamedFields__c to named_fields__c - -[id="{version}-plugins-{type}s-{plugin}-use_test_sandbox"] -===== `use_test_sandbox` - - * Value type is <> - * Default value is `false` - -Set this to true to connect to a sandbox sfdc instance -logging in through test.salesforce.com - -[id="{version}-plugins-{type}s-{plugin}-username"] -===== `username` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A valid salesforce user name, usually your email address. -Used for authentication and will be the user all objects -are created or modified by - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/salesforce-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/salesforce-v3.0.3.asciidoc deleted file mode 100644 index 0cd53d62d..000000000 --- a/docs/versioned-plugins/inputs/salesforce-v3.0.3.asciidoc +++ /dev/null @@ -1,199 +0,0 @@ -:plugin: salesforce -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-salesforce/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Salesforce input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This Logstash input plugin allows you to query Salesforce using SOQL and puts the results -into Logstash, one row per event. You can configure it to pull entire sObjects or only -specific fields. - -NOTE: This input plugin will stop after all the results of the query are processed and will -need to be re-run to fetch new results. It does not utilize the streaming API. - -In order to use this plugin, you will need to create a new SFDC Application using -oauth. More details can be found here: -https://help.salesforce.com/apex/HTViewHelpDoc?id=connected_app_create.htm - -You will also need a username, password, and security token for your salesforce instance. -More details for generating a token can be found here: -https://help.salesforce.com/apex/HTViewHelpDoc?id=user_security_token.htm - -In addition to specifying an sObject, you can also supply a list of API fields -that will be used in the SOQL query. - -==== Example -This example prints all the Salesforce Opportunities to standard out - -[source,ruby] ----------------------------------- -input { - salesforce { - client_id => 'OAUTH CLIENT ID FROM YOUR SFDC APP' - client_secret => 'OAUTH CLIENT SECRET FROM YOUR SFDC APP' - username => 'email@example.com' - password => 'super-secret' - security_token => 'SECURITY TOKEN FOR THIS USER' - sfdc_object_name => 'Opportunity' - } -} - -output { - stdout { - codec => rubydebug - } -} ----------------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Salesforce Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-api_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-client_secret>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-security_token>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-sfdc_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sfdc_filters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sfdc_object_name>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-to_underscores>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_test_sandbox>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-api_version"] -===== `api_version` - - * Value type is <> - * There is no default value for this setting. - -By default, this uses the default Restforce API version. -To override this, set this to something like "32.0" for example - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Consumer Key for authentication. You must set up a new SFDC -connected app with oath to use this output. More information -can be found here: -https://help.salesforce.com/apex/HTViewHelpDoc?id=connected_app_create.htm - -[id="{version}-plugins-{type}s-{plugin}-client_secret"] -===== `client_secret` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Consumer Secret from your oauth enabled connected app - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The password used to login to sfdc - -[id="{version}-plugins-{type}s-{plugin}-security_token"] -===== `security_token` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The security token for this account. For more information about -generting a security token, see: -https://help.salesforce.com/apex/HTViewHelpDoc?id=user_security_token.htm - -[id="{version}-plugins-{type}s-{plugin}-sfdc_fields"] -===== `sfdc_fields` - - * Value type is <> - * Default value is `[]` - -These are the field names to return in the Salesforce query -If this is empty, all fields are returned. - -[id="{version}-plugins-{type}s-{plugin}-sfdc_filters"] -===== `sfdc_filters` - - * Value type is <> - * Default value is `""` - -These options will be added to the WHERE clause in the -SOQL statement. Additional fields can be filtered on by -adding field1 = value1 AND field2 = value2 AND... - -[id="{version}-plugins-{type}s-{plugin}-sfdc_object_name"] -===== `sfdc_object_name` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the salesforce object you are creating or updating - -[id="{version}-plugins-{type}s-{plugin}-to_underscores"] -===== `to_underscores` - - * Value type is <> - * Default value is `false` - -Setting this to true will convert SFDC's NamedFields__c to named_fields__c - -[id="{version}-plugins-{type}s-{plugin}-use_test_sandbox"] -===== `use_test_sandbox` - - * Value type is <> - * Default value is `false` - -Set this to true to connect to a sandbox sfdc instance -logging in through test.salesforce.com - -[id="{version}-plugins-{type}s-{plugin}-username"] -===== `username` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A valid salesforce user name, usually your email address. -Used for authentication and will be the user all objects -are created or modified by - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/snmptrap-index.asciidoc b/docs/versioned-plugins/inputs/snmptrap-index.asciidoc deleted file mode 100644 index 847b27946..000000000 --- a/docs/versioned-plugins/inputs/snmptrap-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: snmptrap -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::snmptrap-v3.0.5.asciidoc[] -include::snmptrap-v3.0.4.asciidoc[] -include::snmptrap-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/snmptrap-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/snmptrap-v3.0.3.asciidoc deleted file mode 100644 index 6184ba8b2..000000000 --- a/docs/versioned-plugins/inputs/snmptrap-v3.0.3.asciidoc +++ /dev/null @@ -1,88 +0,0 @@ -:plugin: snmptrap -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-snmptrap/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Snmptrap input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read snmp trap messages as events - -Resulting `@message` looks like : -[source,ruby] - #], @timestamp=#, @generic_trap=6, - @enterprise=[1.2.3.4.5.6], @source_ip="127.0.0.1", @agent_addr=#, - @specific_trap=99> - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Snmptrap Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-community>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-yamlmibdir>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-community"] -===== `community` - - * Value type is <> - * Default value is `"public"` - -SNMP Community String to listen for. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address to listen on - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `1062` - -The port to listen on. Remember that ports less than 1024 (privileged -ports) may require root to use. hence the default of 1062. - -[id="{version}-plugins-{type}s-{plugin}-yamlmibdir"] -===== `yamlmibdir` - - * Value type is <> - * There is no default value for this setting. - -directory of YAML MIB maps (same format ruby-snmp uses) - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/snmptrap-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/snmptrap-v3.0.4.asciidoc deleted file mode 100644 index 799f83190..000000000 --- a/docs/versioned-plugins/inputs/snmptrap-v3.0.4.asciidoc +++ /dev/null @@ -1,88 +0,0 @@ -:plugin: snmptrap -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-snmptrap/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Snmptrap input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read snmp trap messages as events - -Resulting `@message` looks like : -[source,ruby] - #], @timestamp=#, @generic_trap=6, - @enterprise=[1.2.3.4.5.6], @source_ip="127.0.0.1", @agent_addr=#, - @specific_trap=99> - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Snmptrap Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-community>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-yamlmibdir>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-community"] -===== `community` - - * Value type is <> - * Default value is `"public"` - -SNMP Community String to listen for. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address to listen on - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `1062` - -The port to listen on. Remember that ports less than 1024 (privileged -ports) may require root to use. hence the default of 1062. - -[id="{version}-plugins-{type}s-{plugin}-yamlmibdir"] -===== `yamlmibdir` - - * Value type is <> - * There is no default value for this setting. - -directory of YAML MIB maps (same format ruby-snmp uses) - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/snmptrap-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/snmptrap-v3.0.5.asciidoc deleted file mode 100644 index 10f0dfabf..000000000 --- a/docs/versioned-plugins/inputs/snmptrap-v3.0.5.asciidoc +++ /dev/null @@ -1,88 +0,0 @@ -:plugin: snmptrap -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-snmptrap/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Snmptrap input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read snmp trap messages as events - -Resulting `@message` looks like : -[source,ruby] - #], @timestamp=#, @generic_trap=6, - @enterprise=[1.2.3.4.5.6], @source_ip="127.0.0.1", @agent_addr=#, - @specific_trap=99> - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Snmptrap Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-community>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-yamlmibdir>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-community"] -===== `community` - - * Value type is <> - * Default value is `"public"` - -SNMP Community String to listen for. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address to listen on - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `1062` - -The port to listen on. Remember that ports less than 1024 (privileged -ports) may require root to use. hence the default of 1062. - -[id="{version}-plugins-{type}s-{plugin}-yamlmibdir"] -===== `yamlmibdir` - - * Value type is <> - * There is no default value for this setting. - -directory of YAML MIB maps (same format ruby-snmp uses) - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/sqlite-index.asciidoc b/docs/versioned-plugins/inputs/sqlite-index.asciidoc deleted file mode 100644 index e07d34307..000000000 --- a/docs/versioned-plugins/inputs/sqlite-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: sqlite -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::sqlite-v3.0.3.asciidoc[] -include::sqlite-v3.0.2.asciidoc[] -include::sqlite-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/sqlite-v3.0.1.asciidoc b/docs/versioned-plugins/inputs/sqlite-v3.0.1.asciidoc deleted file mode 100644 index 805744363..000000000 --- a/docs/versioned-plugins/inputs/sqlite-v3.0.1.asciidoc +++ /dev/null @@ -1,124 +0,0 @@ -:plugin: sqlite -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-sqlite/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Sqlite input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read rows from an sqlite database. - -This is most useful in cases where you are logging directly to a table. -Any tables being watched must have an `id` column that is monotonically -increasing. - -All tables are read by default except: - -* ones matching `sqlite_%` - these are internal/adminstrative tables for sqlite -* `since_table` - this is used by this plugin to track state. - -Example -[source,sql] - % sqlite /tmp/example.db - sqlite> CREATE TABLE weblogs ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - ip STRING, - request STRING, - response INTEGER); - sqlite> INSERT INTO weblogs (ip, request, response) - VALUES ("1.2.3.4", "/index.html", 200); - -Then with this logstash config: -[source,ruby] - input { - sqlite { - path => "/tmp/example.db" - type => weblogs - } - } - output { - stdout { - debug => true - } - } - -Sample output: -[source,ruby] - { - "@source" => "sqlite://sadness/tmp/x.db", - "@tags" => [], - "@fields" => { - "ip" => "1.2.3.4", - "request" => "/index.html", - "response" => 200 - }, - "@timestamp" => "2013-05-29T06:16:30.850Z", - "@source_host" => "sadness", - "@source_path" => "/tmp/x.db", - "@message" => "", - "@type" => "foo" - } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Sqlite Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_tables>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-batch"] -===== `batch` - - * Value type is <> - * Default value is `5` - -How many rows to fetch at a time from each `SELECT` call. - -[id="{version}-plugins-{type}s-{plugin}-exclude_tables"] -===== `exclude_tables` - - * Value type is <> - * Default value is `[]` - -Any tables to exclude by name. -By default all tables are followed. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The path to the sqlite database file. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/sqlite-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/sqlite-v3.0.2.asciidoc deleted file mode 100644 index 78cbe4ebf..000000000 --- a/docs/versioned-plugins/inputs/sqlite-v3.0.2.asciidoc +++ /dev/null @@ -1,124 +0,0 @@ -:plugin: sqlite -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-sqlite/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Sqlite input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read rows from an sqlite database. - -This is most useful in cases where you are logging directly to a table. -Any tables being watched must have an `id` column that is monotonically -increasing. - -All tables are read by default except: - -* ones matching `sqlite_%` - these are internal/adminstrative tables for sqlite -* `since_table` - this is used by this plugin to track state. - -Example -[source,sql] - % sqlite /tmp/example.db - sqlite> CREATE TABLE weblogs ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - ip STRING, - request STRING, - response INTEGER); - sqlite> INSERT INTO weblogs (ip, request, response) - VALUES ("1.2.3.4", "/index.html", 200); - -Then with this logstash config: -[source,ruby] - input { - sqlite { - path => "/tmp/example.db" - type => weblogs - } - } - output { - stdout { - debug => true - } - } - -Sample output: -[source,ruby] - { - "@source" => "sqlite://sadness/tmp/x.db", - "@tags" => [], - "@fields" => { - "ip" => "1.2.3.4", - "request" => "/index.html", - "response" => 200 - }, - "@timestamp" => "2013-05-29T06:16:30.850Z", - "@source_host" => "sadness", - "@source_path" => "/tmp/x.db", - "@message" => "", - "@type" => "foo" - } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Sqlite Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_tables>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-batch"] -===== `batch` - - * Value type is <> - * Default value is `5` - -How many rows to fetch at a time from each `SELECT` call. - -[id="{version}-plugins-{type}s-{plugin}-exclude_tables"] -===== `exclude_tables` - - * Value type is <> - * Default value is `[]` - -Any tables to exclude by name. -By default all tables are followed. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The path to the sqlite database file. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/sqlite-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/sqlite-v3.0.3.asciidoc deleted file mode 100644 index 84e4522db..000000000 --- a/docs/versioned-plugins/inputs/sqlite-v3.0.3.asciidoc +++ /dev/null @@ -1,124 +0,0 @@ -:plugin: sqlite -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-sqlite/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Sqlite input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read rows from an sqlite database. - -This is most useful in cases where you are logging directly to a table. -Any tables being watched must have an `id` column that is monotonically -increasing. - -All tables are read by default except: - -* ones matching `sqlite_%` - these are internal/adminstrative tables for sqlite -* `since_table` - this is used by this plugin to track state. - -Example -[source,sql] - % sqlite /tmp/example.db - sqlite> CREATE TABLE weblogs ( - id INTEGER PRIMARY KEY AUTOINCREMENT, - ip STRING, - request STRING, - response INTEGER); - sqlite> INSERT INTO weblogs (ip, request, response) - VALUES ("1.2.3.4", "/index.html", 200); - -Then with this logstash config: -[source,ruby] - input { - sqlite { - path => "/tmp/example.db" - type => weblogs - } - } - output { - stdout { - debug => true - } - } - -Sample output: -[source,ruby] - { - "@source" => "sqlite://sadness/tmp/x.db", - "@tags" => [], - "@fields" => { - "ip" => "1.2.3.4", - "request" => "/index.html", - "response" => 200 - }, - "@timestamp" => "2013-05-29T06:16:30.850Z", - "@source_host" => "sadness", - "@source_path" => "/tmp/x.db", - "@message" => "", - "@type" => "foo" - } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Sqlite Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_tables>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-batch"] -===== `batch` - - * Value type is <> - * Default value is `5` - -How many rows to fetch at a time from each `SELECT` call. - -[id="{version}-plugins-{type}s-{plugin}-exclude_tables"] -===== `exclude_tables` - - * Value type is <> - * Default value is `[]` - -Any tables to exclude by name. -By default all tables are followed. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The path to the sqlite database file. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/sqs-index.asciidoc b/docs/versioned-plugins/inputs/sqs-index.asciidoc deleted file mode 100644 index 560388a05..000000000 --- a/docs/versioned-plugins/inputs/sqs-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: sqs -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -| <> | 2017-05-04 -|======================================================================= - -include::sqs-v3.0.6.asciidoc[] -include::sqs-v3.0.5.asciidoc[] -include::sqs-v3.0.4.asciidoc[] -include::sqs-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/sqs-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/sqs-v3.0.3.asciidoc deleted file mode 100644 index 3665311f0..000000000 --- a/docs/versioned-plugins/inputs/sqs-v3.0.3.asciidoc +++ /dev/null @@ -1,219 +0,0 @@ -:plugin: sqs -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-05-04 -:changelog_url: https://github.com/logstash-plugins/logstash-input-sqs/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Sqs - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - -Pull events from an Amazon Web Services Simple Queue Service (SQS) queue. - -SQS is a simple, scalable queue system that is part of the -Amazon Web Services suite of tools. - -Although SQS is similar to other queuing systems like AMQP, it -uses a custom API and requires that you have an AWS account. -See http://aws.amazon.com/sqs/ for more details on how SQS works, -what the pricing schedule looks like and how to setup a queue. - -To use this plugin, you *must*: - - * Have an AWS account - * Setup an SQS queue - * Create an identify that has access to consume messages from the queue. - -The "consumer" identity must have the following permissions on the queue: - - * `sqs:ChangeMessageVisibility` - * `sqs:ChangeMessageVisibilityBatch` - * `sqs:DeleteMessage` - * `sqs:DeleteMessageBatch` - * `sqs:GetQueueAttributes` - * `sqs:GetQueueUrl` - * `sqs:ListQueues` - * `sqs:ReceiveMessage` - -Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user. -A sample policy is as follows: -[source,json] - { - "Statement": [ - { - "Action": [ - "sqs:ChangeMessageVisibility", - "sqs:ChangeMessageVisibilityBatch", - "sqs:GetQueueAttributes", - "sqs:GetQueueUrl", - "sqs:ListQueues", - "sqs:SendMessage", - "sqs:SendMessageBatch" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:sqs:us-east-1:123456789012:Logstash" - ] - } - ] - } - -See http://aws.amazon.com/iam/ for more details on setting up AWS identities. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Sqs Input Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-id_field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-md5_field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-polling_frequency>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sent_timestamp_field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -|======================================================================= - -Also see <> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-id_field"] -===== `id_field` - - * Value type is <> - * There is no default value for this setting. - -Name of the event field in which to store the SQS message ID - -[id="{version}-plugins-{type}s-{plugin}-md5_field"] -===== `md5_field` - - * Value type is <> - * There is no default value for this setting. - -Name of the event field in which to store the SQS message MD5 checksum - -[id="{version}-plugins-{type}s-{plugin}-polling_frequency"] -===== `polling_frequency` - - * Value type is <> - * Default value is `20` - -Polling frequency, default is 20 seconds - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Name of the SQS Queue name to pull messages from. Note that this is just the name of the queue, not the URL or ARN. - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-sent_timestamp_field"] -===== `sent_timestamp_field` - - * Value type is <> - * There is no default value for this setting. - -Name of the event field in which to store the SQS message Sent Timestamp - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/sqs-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/sqs-v3.0.4.asciidoc deleted file mode 100644 index 51d7a3b8f..000000000 --- a/docs/versioned-plugins/inputs/sqs-v3.0.4.asciidoc +++ /dev/null @@ -1,220 +0,0 @@ -:plugin: sqs -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-sqs/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Sqs input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - -Pull events from an Amazon Web Services Simple Queue Service (SQS) queue. - -SQS is a simple, scalable queue system that is part of the -Amazon Web Services suite of tools. - -Although SQS is similar to other queuing systems like AMQP, it -uses a custom API and requires that you have an AWS account. -See http://aws.amazon.com/sqs/ for more details on how SQS works, -what the pricing schedule looks like and how to setup a queue. - -To use this plugin, you *must*: - - * Have an AWS account - * Setup an SQS queue - * Create an identify that has access to consume messages from the queue. - -The "consumer" identity must have the following permissions on the queue: - - * `sqs:ChangeMessageVisibility` - * `sqs:ChangeMessageVisibilityBatch` - * `sqs:DeleteMessage` - * `sqs:DeleteMessageBatch` - * `sqs:GetQueueAttributes` - * `sqs:GetQueueUrl` - * `sqs:ListQueues` - * `sqs:ReceiveMessage` - -Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user. -A sample policy is as follows: -[source,json] - { - "Statement": [ - { - "Action": [ - "sqs:ChangeMessageVisibility", - "sqs:ChangeMessageVisibilityBatch", - "sqs:GetQueueAttributes", - "sqs:GetQueueUrl", - "sqs:ListQueues", - "sqs:SendMessage", - "sqs:SendMessageBatch" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:sqs:us-east-1:123456789012:Logstash" - ] - } - ] - } - -See http://aws.amazon.com/iam/ for more details on setting up AWS identities. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Sqs Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-id_field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-md5_field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-polling_frequency>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sent_timestamp_field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-id_field"] -===== `id_field` - - * Value type is <> - * There is no default value for this setting. - -Name of the event field in which to store the SQS message ID - -[id="{version}-plugins-{type}s-{plugin}-md5_field"] -===== `md5_field` - - * Value type is <> - * There is no default value for this setting. - -Name of the event field in which to store the SQS message MD5 checksum - -[id="{version}-plugins-{type}s-{plugin}-polling_frequency"] -===== `polling_frequency` - - * Value type is <> - * Default value is `20` - -Polling frequency, default is 20 seconds - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Name of the SQS Queue name to pull messages from. Note that this is just the name of the queue, not the URL or ARN. - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-sent_timestamp_field"] -===== `sent_timestamp_field` - - * Value type is <> - * There is no default value for this setting. - -Name of the event field in which to store the SQS message Sent Timestamp - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/sqs-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/sqs-v3.0.5.asciidoc deleted file mode 100644 index 4a42a2a26..000000000 --- a/docs/versioned-plugins/inputs/sqs-v3.0.5.asciidoc +++ /dev/null @@ -1,220 +0,0 @@ -:plugin: sqs -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-sqs/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Sqs input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - -Pull events from an Amazon Web Services Simple Queue Service (SQS) queue. - -SQS is a simple, scalable queue system that is part of the -Amazon Web Services suite of tools. - -Although SQS is similar to other queuing systems like AMQP, it -uses a custom API and requires that you have an AWS account. -See http://aws.amazon.com/sqs/ for more details on how SQS works, -what the pricing schedule looks like and how to setup a queue. - -To use this plugin, you *must*: - - * Have an AWS account - * Setup an SQS queue - * Create an identify that has access to consume messages from the queue. - -The "consumer" identity must have the following permissions on the queue: - - * `sqs:ChangeMessageVisibility` - * `sqs:ChangeMessageVisibilityBatch` - * `sqs:DeleteMessage` - * `sqs:DeleteMessageBatch` - * `sqs:GetQueueAttributes` - * `sqs:GetQueueUrl` - * `sqs:ListQueues` - * `sqs:ReceiveMessage` - -Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user. -A sample policy is as follows: -[source,json] - { - "Statement": [ - { - "Action": [ - "sqs:ChangeMessageVisibility", - "sqs:ChangeMessageVisibilityBatch", - "sqs:GetQueueAttributes", - "sqs:GetQueueUrl", - "sqs:ListQueues", - "sqs:SendMessage", - "sqs:SendMessageBatch" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:sqs:us-east-1:123456789012:Logstash" - ] - } - ] - } - -See http://aws.amazon.com/iam/ for more details on setting up AWS identities. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Sqs Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-id_field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-md5_field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-polling_frequency>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sent_timestamp_field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-id_field"] -===== `id_field` - - * Value type is <> - * There is no default value for this setting. - -Name of the event field in which to store the SQS message ID - -[id="{version}-plugins-{type}s-{plugin}-md5_field"] -===== `md5_field` - - * Value type is <> - * There is no default value for this setting. - -Name of the event field in which to store the SQS message MD5 checksum - -[id="{version}-plugins-{type}s-{plugin}-polling_frequency"] -===== `polling_frequency` - - * Value type is <> - * Default value is `20` - -Polling frequency, default is 20 seconds - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Name of the SQS Queue name to pull messages from. Note that this is just the name of the queue, not the URL or ARN. - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-sent_timestamp_field"] -===== `sent_timestamp_field` - - * Value type is <> - * There is no default value for this setting. - -Name of the event field in which to store the SQS message Sent Timestamp - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/sqs-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/sqs-v3.0.6.asciidoc deleted file mode 100644 index e9334aebf..000000000 --- a/docs/versioned-plugins/inputs/sqs-v3.0.6.asciidoc +++ /dev/null @@ -1,220 +0,0 @@ -:plugin: sqs -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-sqs/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Sqs input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - -Pull events from an Amazon Web Services Simple Queue Service (SQS) queue. - -SQS is a simple, scalable queue system that is part of the -Amazon Web Services suite of tools. - -Although SQS is similar to other queuing systems like AMQP, it -uses a custom API and requires that you have an AWS account. -See http://aws.amazon.com/sqs/ for more details on how SQS works, -what the pricing schedule looks like and how to setup a queue. - -To use this plugin, you *must*: - - * Have an AWS account - * Setup an SQS queue - * Create an identify that has access to consume messages from the queue. - -The "consumer" identity must have the following permissions on the queue: - - * `sqs:ChangeMessageVisibility` - * `sqs:ChangeMessageVisibilityBatch` - * `sqs:DeleteMessage` - * `sqs:DeleteMessageBatch` - * `sqs:GetQueueAttributes` - * `sqs:GetQueueUrl` - * `sqs:ListQueues` - * `sqs:ReceiveMessage` - -Typically, you should setup an IAM policy, create a user and apply the IAM policy to the user. -A sample policy is as follows: -[source,json] - { - "Statement": [ - { - "Action": [ - "sqs:ChangeMessageVisibility", - "sqs:ChangeMessageVisibilityBatch", - "sqs:GetQueueAttributes", - "sqs:GetQueueUrl", - "sqs:ListQueues", - "sqs:SendMessage", - "sqs:SendMessageBatch" - ], - "Effect": "Allow", - "Resource": [ - "arn:aws:sqs:us-east-1:123456789012:Logstash" - ] - } - ] - } - -See http://aws.amazon.com/iam/ for more details on setting up AWS identities. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Sqs Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-id_field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-md5_field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-polling_frequency>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sent_timestamp_field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-id_field"] -===== `id_field` - - * Value type is <> - * There is no default value for this setting. - -Name of the event field in which to store the SQS message ID - -[id="{version}-plugins-{type}s-{plugin}-md5_field"] -===== `md5_field` - - * Value type is <> - * There is no default value for this setting. - -Name of the event field in which to store the SQS message MD5 checksum - -[id="{version}-plugins-{type}s-{plugin}-polling_frequency"] -===== `polling_frequency` - - * Value type is <> - * Default value is `20` - -Polling frequency, default is 20 seconds - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Name of the SQS Queue name to pull messages from. Note that this is just the name of the queue, not the URL or ARN. - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-sent_timestamp_field"] -===== `sent_timestamp_field` - - * Value type is <> - * There is no default value for this setting. - -Name of the event field in which to store the SQS message Sent Timestamp - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/stdin-index.asciidoc b/docs/versioned-plugins/inputs/stdin-index.asciidoc deleted file mode 100644 index 18bb84826..000000000 --- a/docs/versioned-plugins/inputs/stdin-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: stdin -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::stdin-v3.2.5.asciidoc[] -include::stdin-v3.2.4.asciidoc[] -include::stdin-v3.2.3.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/stdin-v3.2.3.asciidoc b/docs/versioned-plugins/inputs/stdin-v3.2.3.asciidoc deleted file mode 100644 index 895ecac1f..000000000 --- a/docs/versioned-plugins/inputs/stdin-v3.2.3.asciidoc +++ /dev/null @@ -1,39 +0,0 @@ -:plugin: stdin -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-stdin/blob/v3.2.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Stdin input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from standard input. - -By default, each event is assumed to be one line. If you -want to join lines, you'll want to use the multiline codec. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Stdin Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -|======================================================================= - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/stdin-v3.2.4.asciidoc b/docs/versioned-plugins/inputs/stdin-v3.2.4.asciidoc deleted file mode 100644 index ab31da691..000000000 --- a/docs/versioned-plugins/inputs/stdin-v3.2.4.asciidoc +++ /dev/null @@ -1,35 +0,0 @@ -:plugin: stdin -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.4 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-stdin/blob/v3.2.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Stdin input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from standard input. - -By default, each event is assumed to be one line. If you -want to join lines, you'll want to use the multiline codec. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Stdin Input Configuration Options - -There are no special configuration options for this plugin, -but it does support the <<{version}-plugins-{type}s-{plugin}-common-options>>. - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/stdin-v3.2.5.asciidoc b/docs/versioned-plugins/inputs/stdin-v3.2.5.asciidoc deleted file mode 100644 index 33cc8917b..000000000 --- a/docs/versioned-plugins/inputs/stdin-v3.2.5.asciidoc +++ /dev/null @@ -1,35 +0,0 @@ -:plugin: stdin -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-stdin/blob/v3.2.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Stdin input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events from standard input. - -By default, each event is assumed to be one line. If you -want to join lines, you'll want to use the multiline codec. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Stdin Input Configuration Options - -There are no special configuration options for this plugin, -but it does support the <<{version}-plugins-{type}s-{plugin}-common-options>>. - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/stomp-index.asciidoc b/docs/versioned-plugins/inputs/stomp-index.asciidoc deleted file mode 100644 index 2ecffd94b..000000000 --- a/docs/versioned-plugins/inputs/stomp-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: stomp -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-09-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::stomp-v3.0.7.asciidoc[] -include::stomp-v3.0.6.asciidoc[] -include::stomp-v3.0.5.asciidoc[] -include::stomp-v3.0.4.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/stomp-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/stomp-v3.0.4.asciidoc deleted file mode 100644 index 4677dff6e..000000000 --- a/docs/versioned-plugins/inputs/stomp-v3.0.4.asciidoc +++ /dev/null @@ -1,119 +0,0 @@ -:plugin: stomp -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-stomp/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Stomp input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Creates events received with the STOMP protocol. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Stomp Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-destination"] -===== `destination` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The destination to read events from. - -Example: `/topic/logstash` - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * Default value is `"localhost"` - -The address of the STOMP server. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `""` - -The password to authenticate with. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `61613` - -The port to connet to on your STOMP server. - -[id="{version}-plugins-{type}s-{plugin}-reconnect"] -===== `reconnect` - - * Value type is <> - * Default value is `true` - -Auto reconnect - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `30` - - - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `""` - -The username to authenticate with. - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `nil` - -The vhost to use - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/stomp-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/stomp-v3.0.5.asciidoc deleted file mode 100644 index 65ae97efd..000000000 --- a/docs/versioned-plugins/inputs/stomp-v3.0.5.asciidoc +++ /dev/null @@ -1,119 +0,0 @@ -:plugin: stomp -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-stomp/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Stomp input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Creates events received with the STOMP protocol. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Stomp Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-destination"] -===== `destination` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The destination to read events from. - -Example: `/topic/logstash` - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * Default value is `"localhost"` - -The address of the STOMP server. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `""` - -The password to authenticate with. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `61613` - -The port to connet to on your STOMP server. - -[id="{version}-plugins-{type}s-{plugin}-reconnect"] -===== `reconnect` - - * Value type is <> - * Default value is `true` - -Auto reconnect - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `30` - - - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `""` - -The username to authenticate with. - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `nil` - -The vhost to use - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/stomp-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/stomp-v3.0.6.asciidoc deleted file mode 100644 index 27abce30b..000000000 --- a/docs/versioned-plugins/inputs/stomp-v3.0.6.asciidoc +++ /dev/null @@ -1,119 +0,0 @@ -:plugin: stomp -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-09-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-stomp/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Stomp input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Creates events received with the STOMP protocol. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Stomp Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-destination"] -===== `destination` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The destination to read events from. - -Example: `/topic/logstash` - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * Default value is `"localhost"` - -The address of the STOMP server. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `""` - -The password to authenticate with. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `61613` - -The port to connet to on your STOMP server. - -[id="{version}-plugins-{type}s-{plugin}-reconnect"] -===== `reconnect` - - * Value type is <> - * Default value is `true` - -Auto reconnect - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `30` - - - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `""` - -The username to authenticate with. - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `nil` - -The vhost to use - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/stomp-v3.0.7.asciidoc b/docs/versioned-plugins/inputs/stomp-v3.0.7.asciidoc deleted file mode 100644 index 1443b72f6..000000000 --- a/docs/versioned-plugins/inputs/stomp-v3.0.7.asciidoc +++ /dev/null @@ -1,119 +0,0 @@ -:plugin: stomp -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.7 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-stomp/blob/v3.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Stomp input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Creates events received with the STOMP protocol. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Stomp Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-destination"] -===== `destination` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The destination to read events from. - -Example: `/topic/logstash` - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * Default value is `"localhost"` - -The address of the STOMP server. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `""` - -The password to authenticate with. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `61613` - -The port to connet to on your STOMP server. - -[id="{version}-plugins-{type}s-{plugin}-reconnect"] -===== `reconnect` - - * Value type is <> - * Default value is `true` - -Auto reconnect - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `30` - - - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `""` - -The username to authenticate with. - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `nil` - -The vhost to use - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/syslog-index.asciidoc b/docs/versioned-plugins/inputs/syslog-index.asciidoc deleted file mode 100644 index 6cb1c6ec2..000000000 --- a/docs/versioned-plugins/inputs/syslog-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: syslog -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-12-06 -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::syslog-v3.2.4.asciidoc[] -include::syslog-v3.2.3.asciidoc[] -include::syslog-v3.2.2.asciidoc[] -include::syslog-v3.2.1.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/syslog-v3.2.1.asciidoc b/docs/versioned-plugins/inputs/syslog-v3.2.1.asciidoc deleted file mode 100644 index 99c01fe15..000000000 --- a/docs/versioned-plugins/inputs/syslog-v3.2.1.asciidoc +++ /dev/null @@ -1,144 +0,0 @@ -:plugin: syslog -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-syslog/blob/v3.2.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Syslog input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read syslog messages as events over the network. - -This input is a good choice if you already use syslog today. -It is also a good choice if you want to receive logs from -appliances and network devices where you cannot run your own -log collector. - -Of course, 'syslog' is a very muddy term. This input only supports `RFC3164` -syslog with some small modifications. The date format is allowed to be -`RFC3164` style or `ISO8601`. Otherwise the rest of `RFC3164` must be obeyed. -If you do not use `RFC3164`, do not use this input. - -For more information see the http://www.ietf.org/rfc/rfc3164.txt[RFC3164 page]. - -Note: This input will start listeners on both TCP and UDP. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Syslog Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-facility_labels>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-locale>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-severity_labels>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timezone>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-facility_labels"] -===== `facility_labels` - - * Value type is <> - * Default value is `["kernel", "user-level", "mail", "system", "security/authorization", "syslogd", "line printer", "network news", "UUCP", "clock", "security/authorization", "FTP", "NTP", "log audit", "log alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"]` - -Labels for facility levels. These are defined in RFC3164. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address to listen on. - -[id="{version}-plugins-{type}s-{plugin}-locale"] -===== `locale` - - * Value type is <> - * There is no default value for this setting. - -Specify a locale to be used for date parsing using either IETF-BCP47 or POSIX language tag. -Simple examples are `en`,`en-US` for BCP47 or `en_US` for POSIX. -If not specified, the platform default will be used. - -The locale is mostly necessary to be set for parsing month names (pattern with MMM) and -weekday names (pattern with EEE). - - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `514` - -The port to listen on. Remember that ports less than 1024 (privileged -ports) may require root to use. - -[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] -===== `proxy_protocol` - - * Value type is <> - * Default value is `false` - -Proxy protocol support, only v1 is supported at this time -http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt - -[id="{version}-plugins-{type}s-{plugin}-severity_labels"] -===== `severity_labels` - - * Value type is <> - * Default value is `["Emergency", "Alert", "Critical", "Error", "Warning", "Notice", "Informational", "Debug"]` - -Labels for severity levels. These are defined in RFC3164. - -[id="{version}-plugins-{type}s-{plugin}-timezone"] -===== `timezone` - - * Value type is <> - * There is no default value for this setting. - -Specify a time zone canonical ID to be used for date parsing. -The valid IDs are listed on the [Joda.org available time zones page](http://joda-time.sourceforge.net/timezones.html). -This is useful in case the time zone cannot be extracted from the value, -and is not the platform default. -If this is not specified the platform default will be used. -Canonical ID is good as it takes care of daylight saving time for you -For example, `America/Los_Angeles` or `Europe/France` are valid IDs. - -[id="{version}-plugins-{type}s-{plugin}-use_labels"] -===== `use_labels` - - * Value type is <> - * Default value is `true` - -Use label parsing for severity and facility levels. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/syslog-v3.2.2.asciidoc b/docs/versioned-plugins/inputs/syslog-v3.2.2.asciidoc deleted file mode 100644 index bd41dc3e5..000000000 --- a/docs/versioned-plugins/inputs/syslog-v3.2.2.asciidoc +++ /dev/null @@ -1,144 +0,0 @@ -:plugin: syslog -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.2 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-syslog/blob/v3.2.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Syslog input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read syslog messages as events over the network. - -This input is a good choice if you already use syslog today. -It is also a good choice if you want to receive logs from -appliances and network devices where you cannot run your own -log collector. - -Of course, 'syslog' is a very muddy term. This input only supports `RFC3164` -syslog with some small modifications. The date format is allowed to be -`RFC3164` style or `ISO8601`. Otherwise the rest of `RFC3164` must be obeyed. -If you do not use `RFC3164`, do not use this input. - -For more information see the http://www.ietf.org/rfc/rfc3164.txt[RFC3164 page]. - -Note: This input will start listeners on both TCP and UDP. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Syslog Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-facility_labels>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-locale>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-severity_labels>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timezone>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-facility_labels"] -===== `facility_labels` - - * Value type is <> - * Default value is `["kernel", "user-level", "mail", "system", "security/authorization", "syslogd", "line printer", "network news", "UUCP", "clock", "security/authorization", "FTP", "NTP", "log audit", "log alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"]` - -Labels for facility levels. These are defined in RFC3164. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address to listen on. - -[id="{version}-plugins-{type}s-{plugin}-locale"] -===== `locale` - - * Value type is <> - * There is no default value for this setting. - -Specify a locale to be used for date parsing using either IETF-BCP47 or POSIX language tag. -Simple examples are `en`,`en-US` for BCP47 or `en_US` for POSIX. -If not specified, the platform default will be used. - -The locale is mostly necessary to be set for parsing month names (pattern with MMM) and -weekday names (pattern with EEE). - - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `514` - -The port to listen on. Remember that ports less than 1024 (privileged -ports) may require root to use. - -[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] -===== `proxy_protocol` - - * Value type is <> - * Default value is `false` - -Proxy protocol support, only v1 is supported at this time -http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt - -[id="{version}-plugins-{type}s-{plugin}-severity_labels"] -===== `severity_labels` - - * Value type is <> - * Default value is `["Emergency", "Alert", "Critical", "Error", "Warning", "Notice", "Informational", "Debug"]` - -Labels for severity levels. These are defined in RFC3164. - -[id="{version}-plugins-{type}s-{plugin}-timezone"] -===== `timezone` - - * Value type is <> - * There is no default value for this setting. - -Specify a time zone canonical ID to be used for date parsing. -The valid IDs are listed on the [Joda.org available time zones page](http://joda-time.sourceforge.net/timezones.html). -This is useful in case the time zone cannot be extracted from the value, -and is not the platform default. -If this is not specified the platform default will be used. -Canonical ID is good as it takes care of daylight saving time for you -For example, `America/Los_Angeles` or `Europe/France` are valid IDs. - -[id="{version}-plugins-{type}s-{plugin}-use_labels"] -===== `use_labels` - - * Value type is <> - * Default value is `true` - -Use label parsing for severity and facility levels. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/syslog-v3.2.3.asciidoc b/docs/versioned-plugins/inputs/syslog-v3.2.3.asciidoc deleted file mode 100644 index 02f0b3195..000000000 --- a/docs/versioned-plugins/inputs/syslog-v3.2.3.asciidoc +++ /dev/null @@ -1,144 +0,0 @@ -:plugin: syslog -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-syslog/blob/v3.2.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Syslog input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read syslog messages as events over the network. - -This input is a good choice if you already use syslog today. -It is also a good choice if you want to receive logs from -appliances and network devices where you cannot run your own -log collector. - -Of course, 'syslog' is a very muddy term. This input only supports `RFC3164` -syslog with some small modifications. The date format is allowed to be -`RFC3164` style or `ISO8601`. Otherwise the rest of `RFC3164` must be obeyed. -If you do not use `RFC3164`, do not use this input. - -For more information see the http://www.ietf.org/rfc/rfc3164.txt[RFC3164 page]. - -Note: This input will start listeners on both TCP and UDP. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Syslog Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-facility_labels>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-locale>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-severity_labels>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timezone>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-facility_labels"] -===== `facility_labels` - - * Value type is <> - * Default value is `["kernel", "user-level", "mail", "system", "security/authorization", "syslogd", "line printer", "network news", "UUCP", "clock", "security/authorization", "FTP", "NTP", "log audit", "log alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"]` - -Labels for facility levels. These are defined in RFC3164. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address to listen on. - -[id="{version}-plugins-{type}s-{plugin}-locale"] -===== `locale` - - * Value type is <> - * There is no default value for this setting. - -Specify a locale to be used for date parsing using either IETF-BCP47 or POSIX language tag. -Simple examples are `en`,`en-US` for BCP47 or `en_US` for POSIX. -If not specified, the platform default will be used. - -The locale is mostly necessary to be set for parsing month names (pattern with MMM) and -weekday names (pattern with EEE). - - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `514` - -The port to listen on. Remember that ports less than 1024 (privileged -ports) may require root to use. - -[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] -===== `proxy_protocol` - - * Value type is <> - * Default value is `false` - -Proxy protocol support, only v1 is supported at this time -http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt - -[id="{version}-plugins-{type}s-{plugin}-severity_labels"] -===== `severity_labels` - - * Value type is <> - * Default value is `["Emergency", "Alert", "Critical", "Error", "Warning", "Notice", "Informational", "Debug"]` - -Labels for severity levels. These are defined in RFC3164. - -[id="{version}-plugins-{type}s-{plugin}-timezone"] -===== `timezone` - - * Value type is <> - * There is no default value for this setting. - -Specify a time zone canonical ID to be used for date parsing. -The valid IDs are listed on the [Joda.org available time zones page](http://joda-time.sourceforge.net/timezones.html). -This is useful in case the time zone cannot be extracted from the value, -and is not the platform default. -If this is not specified the platform default will be used. -Canonical ID is good as it takes care of daylight saving time for you -For example, `America/Los_Angeles` or `Europe/France` are valid IDs. - -[id="{version}-plugins-{type}s-{plugin}-use_labels"] -===== `use_labels` - - * Value type is <> - * Default value is `true` - -Use label parsing for severity and facility levels. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/syslog-v3.2.4.asciidoc b/docs/versioned-plugins/inputs/syslog-v3.2.4.asciidoc deleted file mode 100644 index faa8a559f..000000000 --- a/docs/versioned-plugins/inputs/syslog-v3.2.4.asciidoc +++ /dev/null @@ -1,144 +0,0 @@ -:plugin: syslog -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.4 -:release_date: 2017-12-06 -:changelog_url: https://github.com/logstash-plugins/logstash-input-syslog/blob/v3.2.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Syslog input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read syslog messages as events over the network. - -This input is a good choice if you already use syslog today. -It is also a good choice if you want to receive logs from -appliances and network devices where you cannot run your own -log collector. - -Of course, 'syslog' is a very muddy term. This input only supports `RFC3164` -syslog with some small modifications. The date format is allowed to be -`RFC3164` style or `ISO8601`. Otherwise the rest of `RFC3164` must be obeyed. -If you do not use `RFC3164`, do not use this input. - -For more information see the http://www.ietf.org/rfc/rfc3164.txt[RFC3164 page]. - -Note: This input will start listeners on both TCP and UDP. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Syslog Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-facility_labels>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-locale>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-severity_labels>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timezone>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-facility_labels"] -===== `facility_labels` - - * Value type is <> - * Default value is `["kernel", "user-level", "mail", "system", "security/authorization", "syslogd", "line printer", "network news", "UUCP", "clock", "security/authorization", "FTP", "NTP", "log audit", "log alert", "clock", "local0", "local1", "local2", "local3", "local4", "local5", "local6", "local7"]` - -Labels for facility levels. These are defined in RFC3164. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address to listen on. - -[id="{version}-plugins-{type}s-{plugin}-locale"] -===== `locale` - - * Value type is <> - * There is no default value for this setting. - -Specify a locale to be used for date parsing using either IETF-BCP47 or POSIX language tag. -Simple examples are `en`,`en-US` for BCP47 or `en_US` for POSIX. -If not specified, the platform default will be used. - -The locale is mostly necessary to be set for parsing month names (pattern with MMM) and -weekday names (pattern with EEE). - - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `514` - -The port to listen on. Remember that ports less than 1024 (privileged -ports) may require root to use. - -[id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] -===== `proxy_protocol` - - * Value type is <> - * Default value is `false` - -Proxy protocol support, only v1 is supported at this time -http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt - -[id="{version}-plugins-{type}s-{plugin}-severity_labels"] -===== `severity_labels` - - * Value type is <> - * Default value is `["Emergency", "Alert", "Critical", "Error", "Warning", "Notice", "Informational", "Debug"]` - -Labels for severity levels. These are defined in RFC3164. - -[id="{version}-plugins-{type}s-{plugin}-timezone"] -===== `timezone` - - * Value type is <> - * There is no default value for this setting. - -Specify a time zone canonical ID to be used for date parsing. -The valid IDs are listed on the [Joda.org available time zones page](http://joda-time.sourceforge.net/timezones.html). -This is useful in case the time zone cannot be extracted from the value, -and is not the platform default. -If this is not specified the platform default will be used. -Canonical ID is good as it takes care of daylight saving time for you -For example, `America/Los_Angeles` or `Europe/France` are valid IDs. - -[id="{version}-plugins-{type}s-{plugin}-use_labels"] -===== `use_labels` - - * Value type is <> - * Default value is `true` - -Use label parsing for severity and facility levels. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/twitter-index.asciidoc b/docs/versioned-plugins/inputs/twitter-index.asciidoc deleted file mode 100644 index e2387b6f8..000000000 --- a/docs/versioned-plugins/inputs/twitter-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: twitter -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -| <> | 2017-05-08 -|======================================================================= - -include::twitter-v3.0.7.asciidoc[] -include::twitter-v3.0.6.asciidoc[] -include::twitter-v3.0.5.asciidoc[] -include::twitter-v3.0.4.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/twitter-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/twitter-v3.0.4.asciidoc deleted file mode 100644 index dc5207e0b..000000000 --- a/docs/versioned-plugins/inputs/twitter-v3.0.4.asciidoc +++ /dev/null @@ -1,225 +0,0 @@ -:plugin: twitter -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-05-08 -:changelog_url: https://github.com/logstash-plugins/logstash-input-twitter/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Twitter - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Ingest events from the Twitter Streaming API. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Twitter Input Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-consumer_key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-consumer_secret>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-follows>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-full_tweet>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ignore_retweets>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keywords>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-languages>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-locations>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-oauth_token>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-oauth_token_secret>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-proxy_address>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-rate_limit_reset_in>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_samples>> |<>|No -|======================================================================= - -Also see <> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-consumer_key"] -===== `consumer_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Twitter App's consumer key - -Don't know what this is? You need to create an "application" -on Twitter, see this url: - -[id="{version}-plugins-{type}s-{plugin}-consumer_secret"] -===== `consumer_secret` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Twitter App's consumer secret - -If you don't have one of these, you can create one by -registering a new application with Twitter: - - -[id="{version}-plugins-{type}s-{plugin}-follows"] -===== `follows` - - * Value type is <> - * There is no default value for this setting. - -A comma separated list of user IDs, indicating the users to -return statuses for in the Twitter stream. -See https://dev.twitter.com/streaming/overview/request-parameters#follow -for more details. - -[id="{version}-plugins-{type}s-{plugin}-full_tweet"] -===== `full_tweet` - - * Value type is <> - * Default value is `false` - -Record full tweet object as given to us by the Twitter Streaming API. - -[id="{version}-plugins-{type}s-{plugin}-ignore_retweets"] -===== `ignore_retweets` - - * Value type is <> - * Default value is `false` - -Lets you ingore the retweets coming out of the Twitter API. Default => false - -[id="{version}-plugins-{type}s-{plugin}-keywords"] -===== `keywords` - - * Value type is <> - * There is no default value for this setting. - -Any keywords to track in the Twitter stream. For multiple keywords, use -the syntax ["foo", "bar"]. There's a logical OR between each keyword -string listed and a logical AND between words separated by spaces per -keyword string. -See https://dev.twitter.com/streaming/overview/request-parameters#track -for more details. - -The wildcard "*" option is not supported. To ingest a sample stream of -all tweets, the use_samples option is recommended. - -[id="{version}-plugins-{type}s-{plugin}-languages"] -===== `languages` - - * Value type is <> - * There is no default value for this setting. - -A list of BCP 47 language identifiers corresponding to any of the languages listed -on Twitter’s advanced search page will only return tweets that have been detected -as being written in the specified languages. - -[id="{version}-plugins-{type}s-{plugin}-locations"] -===== `locations` - - * Value type is <> - * There is no default value for this setting. - -A comma-separated list of longitude, latitude pairs specifying a set -of bounding boxes to filter tweets by. -See https://dev.twitter.com/streaming/overview/request-parameters#locations -for more details. - -[id="{version}-plugins-{type}s-{plugin}-oauth_token"] -===== `oauth_token` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your oauth token. - -To get this, login to Twitter with whatever account you want, -then visit - -Click on your app (used with the consumer_key and consumer_secret settings) -Then at the bottom of the page, click 'Create my access token' which -will create an oauth token and secret bound to your account and that -application. - -[id="{version}-plugins-{type}s-{plugin}-oauth_token_secret"] -===== `oauth_token_secret` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your oauth token secret. - -To get this, login to Twitter with whatever account you want, -then visit - -Click on your app (used with the consumer_key and consumer_secret settings) -Then at the bottom of the page, click 'Create my access token' which -will create an oauth token and secret bound to your account and that -application. - -[id="{version}-plugins-{type}s-{plugin}-proxy_address"] -===== `proxy_address` - - * Value type is <> - * Default value is `"127.0.0.1"` - -Location of the proxy, by default the same machine as the one running this LS instance - -[id="{version}-plugins-{type}s-{plugin}-proxy_port"] -===== `proxy_port` - - * Value type is <> - * Default value is `3128` - -Port where the proxy is listening, by default 3128 (squid) - -[id="{version}-plugins-{type}s-{plugin}-rate_limit_reset_in"] -===== `rate_limit_reset_in` - - * Value type is <> - * Default value is `300` - -Duration in seconds to wait before retrying a connection when twitter responds with a 429 TooManyRequests -In some cases the 'x-rate-limit-reset' header is not set in the response and .rate_limit.reset_in -is nil. If this occurs then we use the integer specified here. The default is 5 minutes. - -[id="{version}-plugins-{type}s-{plugin}-use_proxy"] -===== `use_proxy` - - * Value type is <> - * Default value is `false` - -When to use a proxy to handle the connections - -[id="{version}-plugins-{type}s-{plugin}-use_samples"] -===== `use_samples` - - * Value type is <> - * Default value is `false` - -Returns a small random sample of all public statuses. The tweets returned -by the default access level are the same, so if two different clients connect -to this endpoint, they will see the same tweets. If set to true, the keywords, -follows, locations, and languages options will be ignored. Default => false - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/twitter-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/twitter-v3.0.5.asciidoc deleted file mode 100644 index 92caaee25..000000000 --- a/docs/versioned-plugins/inputs/twitter-v3.0.5.asciidoc +++ /dev/null @@ -1,226 +0,0 @@ -:plugin: twitter -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-twitter/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Twitter input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Ingest events from the Twitter Streaming API. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Twitter Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-consumer_key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-consumer_secret>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-follows>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-full_tweet>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ignore_retweets>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keywords>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-languages>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-locations>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-oauth_token>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-oauth_token_secret>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-proxy_address>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-rate_limit_reset_in>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_samples>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-consumer_key"] -===== `consumer_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Twitter App's consumer key - -Don't know what this is? You need to create an "application" -on Twitter, see this url: - -[id="{version}-plugins-{type}s-{plugin}-consumer_secret"] -===== `consumer_secret` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Twitter App's consumer secret - -If you don't have one of these, you can create one by -registering a new application with Twitter: - - -[id="{version}-plugins-{type}s-{plugin}-follows"] -===== `follows` - - * Value type is <> - * There is no default value for this setting. - -A comma separated list of user IDs, indicating the users to -return statuses for in the Twitter stream. -See https://dev.twitter.com/streaming/overview/request-parameters#follow -for more details. - -[id="{version}-plugins-{type}s-{plugin}-full_tweet"] -===== `full_tweet` - - * Value type is <> - * Default value is `false` - -Record full tweet object as given to us by the Twitter Streaming API. - -[id="{version}-plugins-{type}s-{plugin}-ignore_retweets"] -===== `ignore_retweets` - - * Value type is <> - * Default value is `false` - -Lets you ingore the retweets coming out of the Twitter API. Default => false - -[id="{version}-plugins-{type}s-{plugin}-keywords"] -===== `keywords` - - * Value type is <> - * There is no default value for this setting. - -Any keywords to track in the Twitter stream. For multiple keywords, use -the syntax ["foo", "bar"]. There's a logical OR between each keyword -string listed and a logical AND between words separated by spaces per -keyword string. -See https://dev.twitter.com/streaming/overview/request-parameters#track -for more details. - -The wildcard "*" option is not supported. To ingest a sample stream of -all tweets, the use_samples option is recommended. - -[id="{version}-plugins-{type}s-{plugin}-languages"] -===== `languages` - - * Value type is <> - * There is no default value for this setting. - -A list of BCP 47 language identifiers corresponding to any of the languages listed -on Twitter’s advanced search page will only return tweets that have been detected -as being written in the specified languages. - -[id="{version}-plugins-{type}s-{plugin}-locations"] -===== `locations` - - * Value type is <> - * There is no default value for this setting. - -A comma-separated list of longitude, latitude pairs specifying a set -of bounding boxes to filter tweets by. -See https://dev.twitter.com/streaming/overview/request-parameters#locations -for more details. - -[id="{version}-plugins-{type}s-{plugin}-oauth_token"] -===== `oauth_token` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your oauth token. - -To get this, login to Twitter with whatever account you want, -then visit - -Click on your app (used with the consumer_key and consumer_secret settings) -Then at the bottom of the page, click 'Create my access token' which -will create an oauth token and secret bound to your account and that -application. - -[id="{version}-plugins-{type}s-{plugin}-oauth_token_secret"] -===== `oauth_token_secret` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your oauth token secret. - -To get this, login to Twitter with whatever account you want, -then visit - -Click on your app (used with the consumer_key and consumer_secret settings) -Then at the bottom of the page, click 'Create my access token' which -will create an oauth token and secret bound to your account and that -application. - -[id="{version}-plugins-{type}s-{plugin}-proxy_address"] -===== `proxy_address` - - * Value type is <> - * Default value is `"127.0.0.1"` - -Location of the proxy, by default the same machine as the one running this LS instance - -[id="{version}-plugins-{type}s-{plugin}-proxy_port"] -===== `proxy_port` - - * Value type is <> - * Default value is `3128` - -Port where the proxy is listening, by default 3128 (squid) - -[id="{version}-plugins-{type}s-{plugin}-rate_limit_reset_in"] -===== `rate_limit_reset_in` - - * Value type is <> - * Default value is `300` - -Duration in seconds to wait before retrying a connection when twitter responds with a 429 TooManyRequests -In some cases the 'x-rate-limit-reset' header is not set in the response and .rate_limit.reset_in -is nil. If this occurs then we use the integer specified here. The default is 5 minutes. - -[id="{version}-plugins-{type}s-{plugin}-use_proxy"] -===== `use_proxy` - - * Value type is <> - * Default value is `false` - -When to use a proxy to handle the connections - -[id="{version}-plugins-{type}s-{plugin}-use_samples"] -===== `use_samples` - - * Value type is <> - * Default value is `false` - -Returns a small random sample of all public statuses. The tweets returned -by the default access level are the same, so if two different clients connect -to this endpoint, they will see the same tweets. If set to true, the keywords, -follows, locations, and languages options will be ignored. Default => false - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/twitter-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/twitter-v3.0.6.asciidoc deleted file mode 100644 index ecddbfc70..000000000 --- a/docs/versioned-plugins/inputs/twitter-v3.0.6.asciidoc +++ /dev/null @@ -1,226 +0,0 @@ -:plugin: twitter -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-twitter/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Twitter input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Ingest events from the Twitter Streaming API. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Twitter Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-consumer_key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-consumer_secret>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-follows>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-full_tweet>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ignore_retweets>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keywords>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-languages>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-locations>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-oauth_token>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-oauth_token_secret>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-proxy_address>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-rate_limit_reset_in>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_samples>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-consumer_key"] -===== `consumer_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Twitter App's consumer key - -Don't know what this is? You need to create an "application" -on Twitter, see this url: - -[id="{version}-plugins-{type}s-{plugin}-consumer_secret"] -===== `consumer_secret` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Twitter App's consumer secret - -If you don't have one of these, you can create one by -registering a new application with Twitter: - - -[id="{version}-plugins-{type}s-{plugin}-follows"] -===== `follows` - - * Value type is <> - * There is no default value for this setting. - -A comma separated list of user IDs, indicating the users to -return statuses for in the Twitter stream. -See https://dev.twitter.com/streaming/overview/request-parameters#follow -for more details. - -[id="{version}-plugins-{type}s-{plugin}-full_tweet"] -===== `full_tweet` - - * Value type is <> - * Default value is `false` - -Record full tweet object as given to us by the Twitter Streaming API. - -[id="{version}-plugins-{type}s-{plugin}-ignore_retweets"] -===== `ignore_retweets` - - * Value type is <> - * Default value is `false` - -Lets you ignore the retweets coming out of the Twitter API. Default => false - -[id="{version}-plugins-{type}s-{plugin}-keywords"] -===== `keywords` - - * Value type is <> - * There is no default value for this setting. - -Any keywords to track in the Twitter stream. For multiple keywords, use -the syntax ["foo", "bar"]. There's a logical OR between each keyword -string listed and a logical AND between words separated by spaces per -keyword string. -See https://dev.twitter.com/streaming/overview/request-parameters#track -for more details. - -The wildcard "*" option is not supported. To ingest a sample stream of -all tweets, the use_samples option is recommended. - -[id="{version}-plugins-{type}s-{plugin}-languages"] -===== `languages` - - * Value type is <> - * There is no default value for this setting. - -A list of BCP 47 language identifiers corresponding to any of the languages listed -on Twitter’s advanced search page will only return tweets that have been detected -as being written in the specified languages. - -[id="{version}-plugins-{type}s-{plugin}-locations"] -===== `locations` - - * Value type is <> - * There is no default value for this setting. - -A comma-separated list of longitude, latitude pairs specifying a set -of bounding boxes to filter tweets by. -See https://dev.twitter.com/streaming/overview/request-parameters#locations -for more details. - -[id="{version}-plugins-{type}s-{plugin}-oauth_token"] -===== `oauth_token` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your oauth token. - -To get this, login to Twitter with whatever account you want, -then visit - -Click on your app (used with the consumer_key and consumer_secret settings) -Then at the bottom of the page, click 'Create my access token' which -will create an oauth token and secret bound to your account and that -application. - -[id="{version}-plugins-{type}s-{plugin}-oauth_token_secret"] -===== `oauth_token_secret` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your oauth token secret. - -To get this, login to Twitter with whatever account you want, -then visit - -Click on your app (used with the consumer_key and consumer_secret settings) -Then at the bottom of the page, click 'Create my access token' which -will create an oauth token and secret bound to your account and that -application. - -[id="{version}-plugins-{type}s-{plugin}-proxy_address"] -===== `proxy_address` - - * Value type is <> - * Default value is `"127.0.0.1"` - -Location of the proxy, by default the same machine as the one running this LS instance - -[id="{version}-plugins-{type}s-{plugin}-proxy_port"] -===== `proxy_port` - - * Value type is <> - * Default value is `3128` - -Port where the proxy is listening, by default 3128 (squid) - -[id="{version}-plugins-{type}s-{plugin}-rate_limit_reset_in"] -===== `rate_limit_reset_in` - - * Value type is <> - * Default value is `300` - -Duration in seconds to wait before retrying a connection when twitter responds with a 429 TooManyRequests -In some cases the 'x-rate-limit-reset' header is not set in the response and .rate_limit.reset_in -is nil. If this occurs then we use the integer specified here. The default is 5 minutes. - -[id="{version}-plugins-{type}s-{plugin}-use_proxy"] -===== `use_proxy` - - * Value type is <> - * Default value is `false` - -When to use a proxy to handle the connections - -[id="{version}-plugins-{type}s-{plugin}-use_samples"] -===== `use_samples` - - * Value type is <> - * Default value is `false` - -Returns a small random sample of all public statuses. The tweets returned -by the default access level are the same, so if two different clients connect -to this endpoint, they will see the same tweets. If set to true, the keywords, -follows, locations, and languages options will be ignored. Default => false - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/twitter-v3.0.7.asciidoc b/docs/versioned-plugins/inputs/twitter-v3.0.7.asciidoc deleted file mode 100644 index db4273926..000000000 --- a/docs/versioned-plugins/inputs/twitter-v3.0.7.asciidoc +++ /dev/null @@ -1,226 +0,0 @@ -:plugin: twitter -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.7 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-twitter/blob/v3.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Twitter input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Ingest events from the Twitter Streaming API. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Twitter Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-consumer_key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-consumer_secret>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-follows>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-full_tweet>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ignore_retweets>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keywords>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-languages>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-locations>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-oauth_token>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-oauth_token_secret>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-proxy_address>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-rate_limit_reset_in>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_samples>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-consumer_key"] -===== `consumer_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Twitter App's consumer key - -Don't know what this is? You need to create an "application" -on Twitter, see this url: - -[id="{version}-plugins-{type}s-{plugin}-consumer_secret"] -===== `consumer_secret` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Twitter App's consumer secret - -If you don't have one of these, you can create one by -registering a new application with Twitter: - - -[id="{version}-plugins-{type}s-{plugin}-follows"] -===== `follows` - - * Value type is <> - * There is no default value for this setting. - -A comma separated list of user IDs, indicating the users to -return statuses for in the Twitter stream. -See https://dev.twitter.com/streaming/overview/request-parameters#follow -for more details. - -[id="{version}-plugins-{type}s-{plugin}-full_tweet"] -===== `full_tweet` - - * Value type is <> - * Default value is `false` - -Record full tweet object as given to us by the Twitter Streaming API. - -[id="{version}-plugins-{type}s-{plugin}-ignore_retweets"] -===== `ignore_retweets` - - * Value type is <> - * Default value is `false` - -Lets you ignore the retweets coming out of the Twitter API. Default => false - -[id="{version}-plugins-{type}s-{plugin}-keywords"] -===== `keywords` - - * Value type is <> - * There is no default value for this setting. - -Any keywords to track in the Twitter stream. For multiple keywords, use -the syntax ["foo", "bar"]. There's a logical OR between each keyword -string listed and a logical AND between words separated by spaces per -keyword string. -See https://dev.twitter.com/streaming/overview/request-parameters#track -for more details. - -The wildcard "*" option is not supported. To ingest a sample stream of -all tweets, the use_samples option is recommended. - -[id="{version}-plugins-{type}s-{plugin}-languages"] -===== `languages` - - * Value type is <> - * There is no default value for this setting. - -A list of BCP 47 language identifiers corresponding to any of the languages listed -on Twitter’s advanced search page will only return tweets that have been detected -as being written in the specified languages. - -[id="{version}-plugins-{type}s-{plugin}-locations"] -===== `locations` - - * Value type is <> - * There is no default value for this setting. - -A comma-separated list of longitude, latitude pairs specifying a set -of bounding boxes to filter tweets by. -See https://dev.twitter.com/streaming/overview/request-parameters#locations -for more details. - -[id="{version}-plugins-{type}s-{plugin}-oauth_token"] -===== `oauth_token` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your oauth token. - -To get this, login to Twitter with whatever account you want, -then visit - -Click on your app (used with the consumer_key and consumer_secret settings) -Then at the bottom of the page, click 'Create my access token' which -will create an oauth token and secret bound to your account and that -application. - -[id="{version}-plugins-{type}s-{plugin}-oauth_token_secret"] -===== `oauth_token_secret` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your oauth token secret. - -To get this, login to Twitter with whatever account you want, -then visit - -Click on your app (used with the consumer_key and consumer_secret settings) -Then at the bottom of the page, click 'Create my access token' which -will create an oauth token and secret bound to your account and that -application. - -[id="{version}-plugins-{type}s-{plugin}-proxy_address"] -===== `proxy_address` - - * Value type is <> - * Default value is `"127.0.0.1"` - -Location of the proxy, by default the same machine as the one running this LS instance - -[id="{version}-plugins-{type}s-{plugin}-proxy_port"] -===== `proxy_port` - - * Value type is <> - * Default value is `3128` - -Port where the proxy is listening, by default 3128 (squid) - -[id="{version}-plugins-{type}s-{plugin}-rate_limit_reset_in"] -===== `rate_limit_reset_in` - - * Value type is <> - * Default value is `300` - -Duration in seconds to wait before retrying a connection when twitter responds with a 429 TooManyRequests -In some cases the 'x-rate-limit-reset' header is not set in the response and .rate_limit.reset_in -is nil. If this occurs then we use the integer specified here. The default is 5 minutes. - -[id="{version}-plugins-{type}s-{plugin}-use_proxy"] -===== `use_proxy` - - * Value type is <> - * Default value is `false` - -When to use a proxy to handle the connections - -[id="{version}-plugins-{type}s-{plugin}-use_samples"] -===== `use_samples` - - * Value type is <> - * Default value is `false` - -Returns a small random sample of all public statuses. The tweets returned -by the default access level are the same, so if two different clients connect -to this endpoint, they will see the same tweets. If set to true, the keywords, -follows, locations, and languages options will be ignored. Default => false - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/udp-index.asciidoc b/docs/versioned-plugins/inputs/udp-index.asciidoc deleted file mode 100644 index 298415f5e..000000000 --- a/docs/versioned-plugins/inputs/udp-index.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -:plugin: udp -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-12-19 -| <> | 2017-12-15 -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::udp-v3.2.1.asciidoc[] -include::udp-v3.2.0.asciidoc[] -include::udp-v3.1.3.asciidoc[] -include::udp-v3.1.2.asciidoc[] -include::udp-v3.1.1.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/udp-v3.1.1.asciidoc b/docs/versioned-plugins/inputs/udp-v3.1.1.asciidoc deleted file mode 100644 index 2d7f97091..000000000 --- a/docs/versioned-plugins/inputs/udp-v3.1.1.asciidoc +++ /dev/null @@ -1,106 +0,0 @@ -:plugin: udp -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-udp/blob/v3.1.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Udp input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read messages as events over the network via udp. The only required -configuration item is `port`, which specifies the udp port logstash -will listen on for event streams. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Udp Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-buffer_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-workers>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-buffer_size"] -===== `buffer_size` - - * Value type is <> - * Default value is `65536` - -The maximum packet size to read from the network - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address which logstash will listen on. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The port which logstash will listen on. Remember that ports less -than 1024 (privileged ports) may require root or elevated privileges to use. - -[id="{version}-plugins-{type}s-{plugin}-queue_size"] -===== `queue_size` - - * Value type is <> - * Default value is `2000` - -This is the number of unprocessed UDP packets you can hold in memory -before packets will start dropping. - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The socket receive buffer size in bytes. -If option is not set, the operating system default is used. -The operating system will use the max allowed value if receive_buffer_bytes is larger than allowed. -Consult your operating system documentation if you need to increase this max allowed value. - -[id="{version}-plugins-{type}s-{plugin}-workers"] -===== `workers` - - * Value type is <> - * Default value is `2` - -Number of threads processing packets - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/udp-v3.1.2.asciidoc b/docs/versioned-plugins/inputs/udp-v3.1.2.asciidoc deleted file mode 100644 index c5ddc7554..000000000 --- a/docs/versioned-plugins/inputs/udp-v3.1.2.asciidoc +++ /dev/null @@ -1,106 +0,0 @@ -:plugin: udp -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.2 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-udp/blob/v3.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Udp input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read messages as events over the network via udp. The only required -configuration item is `port`, which specifies the udp port logstash -will listen on for event streams. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Udp Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-buffer_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-workers>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-buffer_size"] -===== `buffer_size` - - * Value type is <> - * Default value is `65536` - -The maximum packet size to read from the network - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address which logstash will listen on. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The port which logstash will listen on. Remember that ports less -than 1024 (privileged ports) may require root or elevated privileges to use. - -[id="{version}-plugins-{type}s-{plugin}-queue_size"] -===== `queue_size` - - * Value type is <> - * Default value is `2000` - -This is the number of unprocessed UDP packets you can hold in memory -before packets will start dropping. - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The socket receive buffer size in bytes. -If option is not set, the operating system default is used. -The operating system will use the max allowed value if receive_buffer_bytes is larger than allowed. -Consult your operating system documentation if you need to increase this max allowed value. - -[id="{version}-plugins-{type}s-{plugin}-workers"] -===== `workers` - - * Value type is <> - * Default value is `2` - -Number of threads processing packets - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/udp-v3.1.3.asciidoc b/docs/versioned-plugins/inputs/udp-v3.1.3.asciidoc deleted file mode 100644 index 3cab2a60f..000000000 --- a/docs/versioned-plugins/inputs/udp-v3.1.3.asciidoc +++ /dev/null @@ -1,106 +0,0 @@ -:plugin: udp -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-udp/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Udp input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read messages as events over the network via udp. The only required -configuration item is `port`, which specifies the udp port logstash -will listen on for event streams. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Udp Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-buffer_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-workers>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-buffer_size"] -===== `buffer_size` - - * Value type is <> - * Default value is `65536` - -The maximum packet size to read from the network - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address which logstash will listen on. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The port which logstash will listen on. Remember that ports less -than 1024 (privileged ports) may require root or elevated privileges to use. - -[id="{version}-plugins-{type}s-{plugin}-queue_size"] -===== `queue_size` - - * Value type is <> - * Default value is `2000` - -This is the number of unprocessed UDP packets you can hold in memory -before packets will start dropping. - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The socket receive buffer size in bytes. -If option is not set, the operating system default is used. -The operating system will use the max allowed value if receive_buffer_bytes is larger than allowed. -Consult your operating system documentation if you need to increase this max allowed value. - -[id="{version}-plugins-{type}s-{plugin}-workers"] -===== `workers` - - * Value type is <> - * Default value is `2` - -Number of threads processing packets - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/udp-v3.2.0.asciidoc b/docs/versioned-plugins/inputs/udp-v3.2.0.asciidoc deleted file mode 100644 index b1c2cea0e..000000000 --- a/docs/versioned-plugins/inputs/udp-v3.2.0.asciidoc +++ /dev/null @@ -1,106 +0,0 @@ -:plugin: udp -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.0 -:release_date: 2017-12-15 -:changelog_url: https://github.com/logstash-plugins/logstash-input-udp/blob/v3.2.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Udp input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read messages as events over the network via udp. The only required -configuration item is `port`, which specifies the udp port logstash -will listen on for event streams. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Udp Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-buffer_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-workers>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-buffer_size"] -===== `buffer_size` - - * Value type is <> - * Default value is `65536` - -The maximum packet size to read from the network - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address which logstash will listen on. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The port which logstash will listen on. Remember that ports less -than 1024 (privileged ports) may require root or elevated privileges to use. - -[id="{version}-plugins-{type}s-{plugin}-queue_size"] -===== `queue_size` - - * Value type is <> - * Default value is `2000` - -This is the number of unprocessed UDP packets you can hold in memory -before packets will start dropping. - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The socket receive buffer size in bytes. -If option is not set, the operating system default is used. -The operating system will use the max allowed value if receive_buffer_bytes is larger than allowed. -Consult your operating system documentation if you need to increase this max allowed value. - -[id="{version}-plugins-{type}s-{plugin}-workers"] -===== `workers` - - * Value type is <> - * Default value is `2` - -Number of threads processing packets - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/udp-v3.2.1.asciidoc b/docs/versioned-plugins/inputs/udp-v3.2.1.asciidoc deleted file mode 100644 index 39707b01e..000000000 --- a/docs/versioned-plugins/inputs/udp-v3.2.1.asciidoc +++ /dev/null @@ -1,106 +0,0 @@ -:plugin: udp -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.1 -:release_date: 2017-12-19 -:changelog_url: https://github.com/logstash-plugins/logstash-input-udp/blob/v3.2.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Udp input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read messages as events over the network via udp. The only required -configuration item is `port`, which specifies the udp port logstash -will listen on for event streams. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Udp Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-buffer_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-workers>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-buffer_size"] -===== `buffer_size` - - * Value type is <> - * Default value is `65536` - -The maximum packet size to read from the network - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address which logstash will listen on. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The port which logstash will listen on. Remember that ports less -than 1024 (privileged ports) may require root or elevated privileges to use. - -[id="{version}-plugins-{type}s-{plugin}-queue_size"] -===== `queue_size` - - * Value type is <> - * Default value is `2000` - -This is the number of unprocessed UDP packets you can hold in memory -before packets will start dropping. - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * There is no default value for this setting. - -The socket receive buffer size in bytes. -If option is not set, the operating system default is used. -The operating system will use the max allowed value if receive_buffer_bytes is larger than allowed. -Consult your operating system documentation if you need to increase this max allowed value. - -[id="{version}-plugins-{type}s-{plugin}-workers"] -===== `workers` - - * Value type is <> - * Default value is `2` - -Number of threads processing packets - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/unix-index.asciidoc b/docs/versioned-plugins/inputs/unix-index.asciidoc deleted file mode 100644 index 69c5c0aba..000000000 --- a/docs/versioned-plugins/inputs/unix-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: unix -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::unix-v3.0.6.asciidoc[] -include::unix-v3.0.5.asciidoc[] -include::unix-v3.0.4.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/unix-v3.0.4.asciidoc b/docs/versioned-plugins/inputs/unix-v3.0.4.asciidoc deleted file mode 100644 index 5e4f10445..000000000 --- a/docs/versioned-plugins/inputs/unix-v3.0.4.asciidoc +++ /dev/null @@ -1,103 +0,0 @@ -:plugin: unix -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-unix/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Unix input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events over a UNIX socket. - -Like `stdin` and `file` inputs, each event is assumed to be one line of text. - -Can either accept connections from clients or connect to a server, -depending on `mode`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Unix Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-data_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-force_unlink>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-socket_not_present_retry_interval_seconds>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-data_timeout"] -===== `data_timeout` - - * Value type is <> - * Default value is `-1` - -The 'read' timeout in seconds. If a particular connection is idle for -more than this timeout period, we will assume it is dead and close it. - -If you never want to timeout, use -1. - -[id="{version}-plugins-{type}s-{plugin}-force_unlink"] -===== `force_unlink` - - * Value type is <> - * Default value is `false` - -Remove socket file in case of EADDRINUSE failure - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"server"` - -Mode to operate in. `server` listens for client connections, -`client` connects to a server. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -When mode is `server`, the path to listen on. -When mode is `client`, the path to connect to. - -[id="{version}-plugins-{type}s-{plugin}-socket_not_present_retry_interval_seconds"] -===== `socket_not_present_retry_interval_seconds` - - * This is a required setting. - * Value type is <> - * Default value is `5` - -Amount of time in seconds to wait if the socket file is not present, before retrying. -Only positive values are allowed. - -This setting is only used if `mode` is `client`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/unix-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/unix-v3.0.5.asciidoc deleted file mode 100644 index 66a633053..000000000 --- a/docs/versioned-plugins/inputs/unix-v3.0.5.asciidoc +++ /dev/null @@ -1,103 +0,0 @@ -:plugin: unix -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-unix/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Unix input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events over a UNIX socket. - -Like `stdin` and `file` inputs, each event is assumed to be one line of text. - -Can either accept connections from clients or connect to a server, -depending on `mode`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Unix Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-data_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-force_unlink>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-socket_not_present_retry_interval_seconds>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-data_timeout"] -===== `data_timeout` - - * Value type is <> - * Default value is `-1` - -The 'read' timeout in seconds. If a particular connection is idle for -more than this timeout period, we will assume it is dead and close it. - -If you never want to timeout, use -1. - -[id="{version}-plugins-{type}s-{plugin}-force_unlink"] -===== `force_unlink` - - * Value type is <> - * Default value is `false` - -Remove socket file in case of EADDRINUSE failure - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"server"` - -Mode to operate in. `server` listens for client connections, -`client` connects to a server. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -When mode is `server`, the path to listen on. -When mode is `client`, the path to connect to. - -[id="{version}-plugins-{type}s-{plugin}-socket_not_present_retry_interval_seconds"] -===== `socket_not_present_retry_interval_seconds` - - * This is a required setting. - * Value type is <> - * Default value is `5` - -Amount of time in seconds to wait if the socket file is not present, before retrying. -Only positive values are allowed. - -This setting is only used if `mode` is `client`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/unix-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/unix-v3.0.6.asciidoc deleted file mode 100644 index f54e49fcf..000000000 --- a/docs/versioned-plugins/inputs/unix-v3.0.6.asciidoc +++ /dev/null @@ -1,103 +0,0 @@ -:plugin: unix -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-unix/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Unix input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events over a UNIX socket. - -Like `stdin` and `file` inputs, each event is assumed to be one line of text. - -Can either accept connections from clients or connect to a server, -depending on `mode`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Unix Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-data_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-force_unlink>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-socket_not_present_retry_interval_seconds>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-data_timeout"] -===== `data_timeout` - - * Value type is <> - * Default value is `-1` - -The 'read' timeout in seconds. If a particular connection is idle for -more than this timeout period, we will assume it is dead and close it. - -If you never want to timeout, use -1. - -[id="{version}-plugins-{type}s-{plugin}-force_unlink"] -===== `force_unlink` - - * Value type is <> - * Default value is `false` - -Remove socket file in case of EADDRINUSE failure - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"server"` - -Mode to operate in. `server` listens for client connections, -`client` connects to a server. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -When mode is `server`, the path to listen on. -When mode is `client`, the path to connect to. - -[id="{version}-plugins-{type}s-{plugin}-socket_not_present_retry_interval_seconds"] -===== `socket_not_present_retry_interval_seconds` - - * This is a required setting. - * Value type is <> - * Default value is `5` - -Amount of time in seconds to wait if the socket file is not present, before retrying. -Only positive values are allowed. - -This setting is only used if `mode` is `client`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/varnishlog-index.asciidoc b/docs/versioned-plugins/inputs/varnishlog-index.asciidoc deleted file mode 100644 index 3cc768a03..000000000 --- a/docs/versioned-plugins/inputs/varnishlog-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: varnishlog -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::varnishlog-v3.0.3.asciidoc[] -include::varnishlog-v3.0.2.asciidoc[] -include::varnishlog-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/varnishlog-v3.0.1.asciidoc b/docs/versioned-plugins/inputs/varnishlog-v3.0.1.asciidoc deleted file mode 100644 index 54139c101..000000000 --- a/docs/versioned-plugins/inputs/varnishlog-v3.0.1.asciidoc +++ /dev/null @@ -1,52 +0,0 @@ -:plugin: varnishlog -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-varnishlog/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Varnishlog input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read from varnish cache's shared memory log - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Varnishlog Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/varnishlog-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/varnishlog-v3.0.2.asciidoc deleted file mode 100644 index a0e44363d..000000000 --- a/docs/versioned-plugins/inputs/varnishlog-v3.0.2.asciidoc +++ /dev/null @@ -1,52 +0,0 @@ -:plugin: varnishlog -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-varnishlog/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Varnishlog input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read from varnish cache's shared memory log - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Varnishlog Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/varnishlog-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/varnishlog-v3.0.3.asciidoc deleted file mode 100644 index 72847a673..000000000 --- a/docs/versioned-plugins/inputs/varnishlog-v3.0.3.asciidoc +++ /dev/null @@ -1,52 +0,0 @@ -:plugin: varnishlog -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-varnishlog/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Varnishlog input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read from varnish cache's shared memory log - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Varnishlog Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/websocket-index.asciidoc b/docs/versioned-plugins/inputs/websocket-index.asciidoc deleted file mode 100644 index 265130c2f..000000000 --- a/docs/versioned-plugins/inputs/websocket-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: websocket -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::websocket-v4.0.3.asciidoc[] -include::websocket-v4.0.2.asciidoc[] -include::websocket-v4.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/websocket-v4.0.1.asciidoc b/docs/versioned-plugins/inputs/websocket-v4.0.1.asciidoc deleted file mode 100644 index cca9d95da..000000000 --- a/docs/versioned-plugins/inputs/websocket-v4.0.1.asciidoc +++ /dev/null @@ -1,64 +0,0 @@ -:plugin: websocket -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-websocket/blob/v4.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Websocket input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events over the websocket protocol. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Websocket Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["client"]`|No -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `client` - * Default value is `"client"` - -Select the plugin's mode of operation. Right now only client mode -is supported, i.e. this plugin connects to a websocket server and -receives events from the server as websocket messages. - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The URL to connect to. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/websocket-v4.0.2.asciidoc b/docs/versioned-plugins/inputs/websocket-v4.0.2.asciidoc deleted file mode 100644 index 6035637fd..000000000 --- a/docs/versioned-plugins/inputs/websocket-v4.0.2.asciidoc +++ /dev/null @@ -1,64 +0,0 @@ -:plugin: websocket -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.2 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-websocket/blob/v4.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Websocket input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events over the websocket protocol. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Websocket Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["client"]`|No -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `client` - * Default value is `"client"` - -Select the plugin's mode of operation. Right now only client mode -is supported, i.e. this plugin connects to a websocket server and -receives events from the server as websocket messages. - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The URL to connect to. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/websocket-v4.0.3.asciidoc b/docs/versioned-plugins/inputs/websocket-v4.0.3.asciidoc deleted file mode 100644 index ec144ad32..000000000 --- a/docs/versioned-plugins/inputs/websocket-v4.0.3.asciidoc +++ /dev/null @@ -1,64 +0,0 @@ -:plugin: websocket -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-websocket/blob/v4.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Websocket input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events over the websocket protocol. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Websocket Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["client"]`|No -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `client` - * Default value is `"client"` - -Select the plugin's mode of operation. Right now only client mode -is supported, i.e. this plugin connects to a websocket server and -receives events from the server as websocket messages. - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The URL to connect to. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/wmi-index.asciidoc b/docs/versioned-plugins/inputs/wmi-index.asciidoc deleted file mode 100644 index 8587aba71..000000000 --- a/docs/versioned-plugins/inputs/wmi-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: wmi -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::wmi-v3.0.3.asciidoc[] -include::wmi-v3.0.2.asciidoc[] -include::wmi-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/wmi-v3.0.1.asciidoc b/docs/versioned-plugins/inputs/wmi-v3.0.1.asciidoc deleted file mode 100644 index 9ead53012..000000000 --- a/docs/versioned-plugins/inputs/wmi-v3.0.1.asciidoc +++ /dev/null @@ -1,119 +0,0 @@ -:plugin: wmi -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-wmi/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Wmi input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Collect data from WMI query - -This is useful for collecting performance metrics and other data -which is accessible via WMI on a Windows host - -Example: -[source,ruby] - input { - wmi { - query => "select * from Win32_Process" - interval => 10 - } - wmi { - query => "select PercentProcessorTime from Win32_PerfFormattedData_PerfOS_Processor where name = '_Total'" - } - wmi { # Connect to a remote host - query => "select * from Win32_Process" - host => "MyRemoteHost" - user => "mydomain\myuser" - password => "Password" - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Wmi Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -Host to connect to ( Defaults to localhost ) - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `10` - -Polling interval - -[id="{version}-plugins-{type}s-{plugin}-namespace"] -===== `namespace` - - * Value type is <> - * Default value is `"root\\cimv2"` - -Namespace when doing remote connections - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password when doing remote connections - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -WMI query - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Username when doing remote connections - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/wmi-v3.0.2.asciidoc b/docs/versioned-plugins/inputs/wmi-v3.0.2.asciidoc deleted file mode 100644 index 8405a91cf..000000000 --- a/docs/versioned-plugins/inputs/wmi-v3.0.2.asciidoc +++ /dev/null @@ -1,119 +0,0 @@ -:plugin: wmi -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-wmi/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Wmi input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Collect data from WMI query - -This is useful for collecting performance metrics and other data -which is accessible via WMI on a Windows host - -Example: -[source,ruby] - input { - wmi { - query => "select * from Win32_Process" - interval => 10 - } - wmi { - query => "select PercentProcessorTime from Win32_PerfFormattedData_PerfOS_Processor where name = '_Total'" - } - wmi { # Connect to a remote host - query => "select * from Win32_Process" - host => "MyRemoteHost" - user => "mydomain\myuser" - password => "Password" - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Wmi Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -Host to connect to ( Defaults to localhost ) - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `10` - -Polling interval - -[id="{version}-plugins-{type}s-{plugin}-namespace"] -===== `namespace` - - * Value type is <> - * Default value is `"root\\cimv2"` - -Namespace when doing remote connections - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password when doing remote connections - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -WMI query - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Username when doing remote connections - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/wmi-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/wmi-v3.0.3.asciidoc deleted file mode 100644 index a16498907..000000000 --- a/docs/versioned-plugins/inputs/wmi-v3.0.3.asciidoc +++ /dev/null @@ -1,119 +0,0 @@ -:plugin: wmi -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-wmi/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Wmi input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Collect data from WMI query - -This is useful for collecting performance metrics and other data -which is accessible via WMI on a Windows host - -Example: -[source,ruby] - input { - wmi { - query => "select * from Win32_Process" - interval => 10 - } - wmi { - query => "select PercentProcessorTime from Win32_PerfFormattedData_PerfOS_Processor where name = '_Total'" - } - wmi { # Connect to a remote host - query => "select * from Win32_Process" - host => "MyRemoteHost" - user => "mydomain\myuser" - password => "Password" - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Wmi Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-query>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -Host to connect to ( Defaults to localhost ) - -[id="{version}-plugins-{type}s-{plugin}-interval"] -===== `interval` - - * Value type is <> - * Default value is `10` - -Polling interval - -[id="{version}-plugins-{type}s-{plugin}-namespace"] -===== `namespace` - - * Value type is <> - * Default value is `"root\\cimv2"` - -Namespace when doing remote connections - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password when doing remote connections - -[id="{version}-plugins-{type}s-{plugin}-query"] -===== `query` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -WMI query - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * There is no default value for this setting. - -Username when doing remote connections - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/xmpp-index.asciidoc b/docs/versioned-plugins/inputs/xmpp-index.asciidoc deleted file mode 100644 index 32f60ecf4..000000000 --- a/docs/versioned-plugins/inputs/xmpp-index.asciidoc +++ /dev/null @@ -1,18 +0,0 @@ -:plugin: xmpp -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-27 -| <> | 2017-06-23 -|======================================================================= - -include::xmpp-v3.1.6.asciidoc[] -include::xmpp-v3.1.5.asciidoc[] -include::xmpp-v3.1.4.asciidoc[] -include::xmpp-v3.1.3.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/xmpp-v3.1.3.asciidoc b/docs/versioned-plugins/inputs/xmpp-v3.1.3.asciidoc deleted file mode 100644 index c12466803..000000000 --- a/docs/versioned-plugins/inputs/xmpp-v3.1.3.asciidoc +++ /dev/null @@ -1,87 +0,0 @@ -:plugin: xmpp -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-xmpp/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Xmpp input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input allows you to receive events over XMPP/Jabber. - -This plugin can be used for accepting events from humans or applications -XMPP, or you can use it for PubSub or general message passing for logstash to -logstash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Xmpp Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * There is no default value for this setting. - -The xmpp server to connect to. This is optional. If you omit this setting, -the host on the user/identity is used. (`foo.com` for `user@foo.com`) - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The xmpp password for the user/identity. - -[id="{version}-plugins-{type}s-{plugin}-rooms"] -===== `rooms` - - * Value type is <> - * There is no default value for this setting. - -if muc/multi-user-chat required, give the name of the room that -you want to join: `room@conference.domain/nick` - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The user or resource ID, like `foo@example.com`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/xmpp-v3.1.4.asciidoc b/docs/versioned-plugins/inputs/xmpp-v3.1.4.asciidoc deleted file mode 100644 index e32b0cb01..000000000 --- a/docs/versioned-plugins/inputs/xmpp-v3.1.4.asciidoc +++ /dev/null @@ -1,87 +0,0 @@ -:plugin: xmpp -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.4 -:release_date: 2017-06-27 -:changelog_url: https://github.com/logstash-plugins/logstash-input-xmpp/blob/v3.1.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Xmpp input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input allows you to receive events over XMPP/Jabber. - -This plugin can be used for accepting events from humans or applications -XMPP, or you can use it for PubSub or general message passing for logstash to -logstash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Xmpp Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * There is no default value for this setting. - -The xmpp server to connect to. This is optional. If you omit this setting, -the host on the user/identity is used. (`foo.com` for `user@foo.com`) - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The xmpp password for the user/identity. - -[id="{version}-plugins-{type}s-{plugin}-rooms"] -===== `rooms` - - * Value type is <> - * There is no default value for this setting. - -if muc/multi-user-chat required, give the name of the room that -you want to join: `room@conference.domain/nick` - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The user or resource ID, like `foo@example.com`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/xmpp-v3.1.5.asciidoc b/docs/versioned-plugins/inputs/xmpp-v3.1.5.asciidoc deleted file mode 100644 index 813db700f..000000000 --- a/docs/versioned-plugins/inputs/xmpp-v3.1.5.asciidoc +++ /dev/null @@ -1,87 +0,0 @@ -:plugin: xmpp -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.5 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-xmpp/blob/v3.1.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Xmpp input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input allows you to receive events over XMPP/Jabber. - -This plugin can be used for accepting events from humans or applications -XMPP, or you can use it for PubSub or general message passing for logstash to -logstash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Xmpp Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * There is no default value for this setting. - -The xmpp server to connect to. This is optional. If you omit this setting, -the host on the user/identity is used. (`foo.com` for `user@foo.com`) - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The xmpp password for the user/identity. - -[id="{version}-plugins-{type}s-{plugin}-rooms"] -===== `rooms` - - * Value type is <> - * There is no default value for this setting. - -if muc/multi-user-chat required, give the name of the room that -you want to join: `room@conference.domain/nick` - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The user or resource ID, like `foo@example.com`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/xmpp-v3.1.6.asciidoc b/docs/versioned-plugins/inputs/xmpp-v3.1.6.asciidoc deleted file mode 100644 index 6dafde59c..000000000 --- a/docs/versioned-plugins/inputs/xmpp-v3.1.6.asciidoc +++ /dev/null @@ -1,87 +0,0 @@ -:plugin: xmpp -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.6 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-input-xmpp/blob/v3.1.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Xmpp input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This input allows you to receive events over XMPP/Jabber. - -This plugin can be used for accepting events from humans or applications -XMPP, or you can use it for PubSub or general message passing for logstash to -logstash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Xmpp Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * There is no default value for this setting. - -The xmpp server to connect to. This is optional. If you omit this setting, -the host on the user/identity is used. (`foo.com` for `user@foo.com`) - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The xmpp password for the user/identity. - -[id="{version}-plugins-{type}s-{plugin}-rooms"] -===== `rooms` - - * Value type is <> - * There is no default value for this setting. - -if muc/multi-user-chat required, give the name of the room that -you want to join: `room@conference.domain/nick` - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The user or resource ID, like `foo@example.com`. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/zenoss-index.asciidoc b/docs/versioned-plugins/inputs/zenoss-index.asciidoc deleted file mode 100644 index 1a283f2e5..000000000 --- a/docs/versioned-plugins/inputs/zenoss-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: zenoss -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::zenoss-v2.0.6.asciidoc[] -include::zenoss-v2.0.5.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/zenoss-v2.0.5.asciidoc b/docs/versioned-plugins/inputs/zenoss-v2.0.5.asciidoc deleted file mode 100644 index 5fa3a3778..000000000 --- a/docs/versioned-plugins/inputs/zenoss-v2.0.5.asciidoc +++ /dev/null @@ -1,398 +0,0 @@ -:plugin: zenoss -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-zenoss/blob/v2.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Zenoss input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read Zenoss events from the zenoss.zenevents fanout exchange. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Zenoss Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ack>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-auto_delete>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclusive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_enabled>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefetch_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ack"] -===== `ack` - - * Value type is <> - * Default value is `true` - -Enable message acknowledgements. With acknowledgements -messages fetched by Logstash but not yet sent into the -Logstash pipeline will be requeued by the server if Logstash -shuts down. Acknowledgements will however hurt the message -throughput. - -This will only send an ack back every `prefetch_count` messages. -Working in batches provides a performance boost here. - -[id="{version}-plugins-{type}s-{plugin}-arguments"] -===== `arguments` - - * Value type is <> - * Default value is `{}` - -Extra queue arguments as an array. -To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` - -[id="{version}-plugins-{type}s-{plugin}-auto_delete"] -===== `auto_delete` - - * Value type is <> - * Default value is `false` - -Should the queue be deleted on the broker when the last consumer -disconnects? Set this option to `false` if you want the queue to remain -on the broker, queueing up messages until a consumer comes along to -consume them. - -[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] -===== `automatic_recovery` - - * Value type is <> - * Default value is `true` - -Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! - -[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] -===== `connect_retry_interval` - - * Value type is <> - * Default value is `1` - -Time in seconds to wait before retrying a connection - -[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] -===== `connection_timeout` - - * Value type is <> - * There is no default value for this setting. - -The default connection timeout in milliseconds. If not specified the timeout is infinite. - -[id="{version}-plugins-{type}s-{plugin}-durable"] -===== `durable` - - * Value type is <> - * Default value is `false` - -Is this queue durable? (aka; Should it survive a broker restart?) - -[id="{version}-plugins-{type}s-{plugin}-exchange"] -===== `exchange` - - * Value type is <> - * Default value is `"zenoss.zenevents"` - -The name of the exchange to bind the queue. This is analogous to the 'rabbitmq -output' [config 'name'](../outputs/rabbitmq) - -[id="{version}-plugins-{type}s-{plugin}-exchange_type"] -===== `exchange_type` - - * Value type is <> - * There is no default value for this setting. - -The type of the exchange to bind to. Specifying this will cause this plugin -to declare the exchange if it does not exist. - -[id="{version}-plugins-{type}s-{plugin}-exclusive"] -===== `exclusive` - - * Value type is <> - * Default value is `false` - -Is the queue exclusive? Exclusive queues can only be used by the connection -that declared them and will be deleted when it is closed (e.g. due to a Logstash -restart). - -[id="{version}-plugins-{type}s-{plugin}-heartbeat"] -===== `heartbeat` - - * Value type is <> - * There is no default value for this setting. - -Heartbeat delay in seconds. If unspecified no heartbeats will be sent - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -Your rabbitmq server address - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * Default value is `"zenoss.zenevent.#"` - -The routing key to use. This is only valid for direct or fanout exchanges - -* Routing keys are ignored on topic exchanges. -* Wildcards are not valid on direct exchanges. - -[id="{version}-plugins-{type}s-{plugin}-metadata_enabled"] -===== `metadata_enabled` - - * Value type is <> - * Default value is `false` - -Enable the storage of message headers and properties in `@metadata`. This may impact performance - -[id="{version}-plugins-{type}s-{plugin}-passive"] -===== `passive` - - * Value type is <> - * Default value is `false` - -Passive queue creation? Useful for checking queue existance without modifying server state - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `"zenoss"` - -Your rabbitmq password - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5672` - -RabbitMQ port to connect on - -[id="{version}-plugins-{type}s-{plugin}-prefetch_count"] -===== `prefetch_count` - - * Value type is <> - * Default value is `256` - -Prefetch count. If acknowledgements are enabled with the `ack` -option, specifies the number of outstanding unacknowledged -messages allowed. - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * Value type is <> - * Default value is `""` - -Pull events from a http://www.rabbitmq.com/[RabbitMQ] queue. - -The default settings will create an entirely transient queue and listen for all messages by default. -If you need durability or any other advanced settings, please set the appropriate options - -This plugin uses the http://rubymarchhare.info/[March Hare] library -for interacting with the RabbitMQ server. Most configuration options -map directly to standard RabbitMQ and AMQP concepts. The -https://www.rabbitmq.com/amqp-0-9-1-reference.html[AMQP 0-9-1 reference guide] -and other parts of the RabbitMQ documentation are useful for deeper -understanding. - -The properties of messages received will be stored in the -`[@metadata][rabbitmq_properties]` field if the `@metadata_enabled` setting is checked. -Note that storing metadata may degrade performance. -The following properties may be available (in most cases dependent on whether -they were set by the sender): - -* app-id -* cluster-id -* consumer-tag -* content-encoding -* content-type -* correlation-id -* delivery-mode -* exchange -* expiration -* message-id -* priority -* redeliver -* reply-to -* routing-key -* timestamp -* type -* user-id - -For example, to get the RabbitMQ message's timestamp property -into the Logstash event's `@timestamp` field, use the date -filter to parse the `[@metadata][rabbitmq_properties][timestamp]` -field: -[source,ruby] - filter { - if [@metadata][rabbitmq_properties][timestamp] { - date { - match => ["[@metadata][rabbitmq_properties][timestamp]", "UNIX"] - } - } - } - -Additionally, any message headers will be saved in the -`[@metadata][rabbitmq_headers]` field. -The properties to extract from each message and store in a -@metadata field. - -Technically the exchange, redeliver, and routing-key -properties belong to the envelope and not the message but we -ignore that distinction here. However, we extract the -headers separately via get_headers even though the header -table technically is a message property. - -Freezing all strings so that code modifying the event's -@metadata field can't touch them. - -If updating this list, remember to update the documentation -above too. -The default codec for this plugin is JSON. You can override this to suit your particular needs however. -The name of the queue Logstash will consume events from. If -left empty, a transient queue with an randomly chosen name -will be created. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * There is no default value for this setting. - -Enable or disable SSL. -Note that by default remote certificate verification is off. -Specify ssl_certificate_path and ssl_certificate_password if you need -certificate verification - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] -===== `ssl_certificate_password` - - * Value type is <> - * There is no default value for this setting. - -Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] -===== `ssl_certificate_path` - - * Value type is <> - * There is no default value for this setting. - -Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host - -[id="{version}-plugins-{type}s-{plugin}-ssl_version"] -===== `ssl_version` - - * Value type is <> - * Default value is `"TLSv1.2"` - -Version of the SSL protocol to use. - -[id="{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds"] -===== `subscription_retry_interval_seconds` - - * This is a required setting. - * Value type is <> - * Default value is `5` - -Amount of time in seconds to wait after a failed subscription request -before retrying. Subscribes can fail if the server goes away and then comes back. - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - -[id="{version}-plugins-{type}s-{plugin}-tls_certificate_password"] -===== `tls_certificate_password` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -TLS certificate password - -[id="{version}-plugins-{type}s-{plugin}-tls_certificate_path"] -===== `tls_certificate_path` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -TLS certifcate path - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"zenoss"` - -Your rabbitmq username - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `"/zenoss"` - -The vhost to use. If you don't know what this is, leave the default. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/zenoss-v2.0.6.asciidoc b/docs/versioned-plugins/inputs/zenoss-v2.0.6.asciidoc deleted file mode 100644 index 8a7f9609d..000000000 --- a/docs/versioned-plugins/inputs/zenoss-v2.0.6.asciidoc +++ /dev/null @@ -1,398 +0,0 @@ -:plugin: zenoss -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.6 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-input-zenoss/blob/v2.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Zenoss input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read Zenoss events from the zenoss.zenevents fanout exchange. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Zenoss Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-ack>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-auto_delete>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclusive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_enabled>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefetch_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-ack"] -===== `ack` - - * Value type is <> - * Default value is `true` - -Enable message acknowledgements. With acknowledgements -messages fetched by Logstash but not yet sent into the -Logstash pipeline will be requeued by the server if Logstash -shuts down. Acknowledgements will however hurt the message -throughput. - -This will only send an ack back every `prefetch_count` messages. -Working in batches provides a performance boost here. - -[id="{version}-plugins-{type}s-{plugin}-arguments"] -===== `arguments` - - * Value type is <> - * Default value is `{}` - -Extra queue arguments as an array. -To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` - -[id="{version}-plugins-{type}s-{plugin}-auto_delete"] -===== `auto_delete` - - * Value type is <> - * Default value is `false` - -Should the queue be deleted on the broker when the last consumer -disconnects? Set this option to `false` if you want the queue to remain -on the broker, queueing up messages until a consumer comes along to -consume them. - -[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] -===== `automatic_recovery` - - * Value type is <> - * Default value is `true` - -Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! - -[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] -===== `connect_retry_interval` - - * Value type is <> - * Default value is `1` - -Time in seconds to wait before retrying a connection - -[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] -===== `connection_timeout` - - * Value type is <> - * There is no default value for this setting. - -The default connection timeout in milliseconds. If not specified the timeout is infinite. - -[id="{version}-plugins-{type}s-{plugin}-durable"] -===== `durable` - - * Value type is <> - * Default value is `false` - -Is this queue durable? (aka; Should it survive a broker restart?) - -[id="{version}-plugins-{type}s-{plugin}-exchange"] -===== `exchange` - - * Value type is <> - * Default value is `"zenoss.zenevents"` - -The name of the exchange to bind the queue. This is analogous to the 'rabbitmq -output' [config 'name'](../outputs/rabbitmq) - -[id="{version}-plugins-{type}s-{plugin}-exchange_type"] -===== `exchange_type` - - * Value type is <> - * There is no default value for this setting. - -The type of the exchange to bind to. Specifying this will cause this plugin -to declare the exchange if it does not exist. - -[id="{version}-plugins-{type}s-{plugin}-exclusive"] -===== `exclusive` - - * Value type is <> - * Default value is `false` - -Is the queue exclusive? Exclusive queues can only be used by the connection -that declared them and will be deleted when it is closed (e.g. due to a Logstash -restart). - -[id="{version}-plugins-{type}s-{plugin}-heartbeat"] -===== `heartbeat` - - * Value type is <> - * There is no default value for this setting. - -Heartbeat delay in seconds. If unspecified no heartbeats will be sent - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -Your rabbitmq server address - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * Default value is `"zenoss.zenevent.#"` - -The routing key to use. This is only valid for direct or fanout exchanges - -* Routing keys are ignored on topic exchanges. -* Wildcards are not valid on direct exchanges. - -[id="{version}-plugins-{type}s-{plugin}-metadata_enabled"] -===== `metadata_enabled` - - * Value type is <> - * Default value is `false` - -Enable the storage of message headers and properties in `@metadata`. This may impact performance - -[id="{version}-plugins-{type}s-{plugin}-passive"] -===== `passive` - - * Value type is <> - * Default value is `false` - -Passive queue creation? Useful for checking queue existance without modifying server state - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `"zenoss"` - -Your rabbitmq password - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5672` - -RabbitMQ port to connect on - -[id="{version}-plugins-{type}s-{plugin}-prefetch_count"] -===== `prefetch_count` - - * Value type is <> - * Default value is `256` - -Prefetch count. If acknowledgements are enabled with the `ack` -option, specifies the number of outstanding unacknowledged -messages allowed. - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * Value type is <> - * Default value is `""` - -Pull events from a http://www.rabbitmq.com/[RabbitMQ] queue. - -The default settings will create an entirely transient queue and listen for all messages by default. -If you need durability or any other advanced settings, please set the appropriate options - -This plugin uses the http://rubymarchhare.info/[March Hare] library -for interacting with the RabbitMQ server. Most configuration options -map directly to standard RabbitMQ and AMQP concepts. The -https://www.rabbitmq.com/amqp-0-9-1-reference.html[AMQP 0-9-1 reference guide] -and other parts of the RabbitMQ documentation are useful for deeper -understanding. - -The properties of messages received will be stored in the -`[@metadata][rabbitmq_properties]` field if the `@metadata_enabled` setting is checked. -Note that storing metadata may degrade performance. -The following properties may be available (in most cases dependent on whether -they were set by the sender): - -* app-id -* cluster-id -* consumer-tag -* content-encoding -* content-type -* correlation-id -* delivery-mode -* exchange -* expiration -* message-id -* priority -* redeliver -* reply-to -* routing-key -* timestamp -* type -* user-id - -For example, to get the RabbitMQ message's timestamp property -into the Logstash event's `@timestamp` field, use the date -filter to parse the `[@metadata][rabbitmq_properties][timestamp]` -field: -[source,ruby] - filter { - if [@metadata][rabbitmq_properties][timestamp] { - date { - match => ["[@metadata][rabbitmq_properties][timestamp]", "UNIX"] - } - } - } - -Additionally, any message headers will be saved in the -`[@metadata][rabbitmq_headers]` field. -The properties to extract from each message and store in a -@metadata field. - -Technically the exchange, redeliver, and routing-key -properties belong to the envelope and not the message but we -ignore that distinction here. However, we extract the -headers separately via get_headers even though the header -table technically is a message property. - -Freezing all strings so that code modifying the event's -@metadata field can't touch them. - -If updating this list, remember to update the documentation -above too. -The default codec for this plugin is JSON. You can override this to suit your particular needs however. -The name of the queue Logstash will consume events from. If -left empty, a transient queue with an randomly chosen name -will be created. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * There is no default value for this setting. - -Enable or disable SSL. -Note that by default remote certificate verification is off. -Specify ssl_certificate_path and ssl_certificate_password if you need -certificate verification - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] -===== `ssl_certificate_password` - - * Value type is <> - * There is no default value for this setting. - -Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] -===== `ssl_certificate_path` - - * Value type is <> - * There is no default value for this setting. - -Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host - -[id="{version}-plugins-{type}s-{plugin}-ssl_version"] -===== `ssl_version` - - * Value type is <> - * Default value is `"TLSv1.2"` - -Version of the SSL protocol to use. - -[id="{version}-plugins-{type}s-{plugin}-subscription_retry_interval_seconds"] -===== `subscription_retry_interval_seconds` - - * This is a required setting. - * Value type is <> - * Default value is `5` - -Amount of time in seconds to wait after a failed subscription request -before retrying. Subscribes can fail if the server goes away and then comes back. - -[id="{version}-plugins-{type}s-{plugin}-threads"] -===== `threads` - - * Value type is <> - * Default value is `1` - - - -[id="{version}-plugins-{type}s-{plugin}-tls_certificate_password"] -===== `tls_certificate_password` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -TLS certificate password - -[id="{version}-plugins-{type}s-{plugin}-tls_certificate_path"] -===== `tls_certificate_path` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -TLS certifcate path - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"zenoss"` - -Your rabbitmq username - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `"/zenoss"` - -The vhost to use. If you don't know what this is, leave the default. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/zeromq-index.asciidoc b/docs/versioned-plugins/inputs/zeromq-index.asciidoc deleted file mode 100644 index 703501002..000000000 --- a/docs/versioned-plugins/inputs/zeromq-index.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -:plugin: zeromq -:type: input - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-06-23 -|======================================================================= - -include::zeromq-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/inputs/zeromq-v3.0.3.asciidoc b/docs/versioned-plugins/inputs/zeromq-v3.0.3.asciidoc deleted file mode 100644 index d4416b778..000000000 --- a/docs/versioned-plugins/inputs/zeromq-v3.0.3.asciidoc +++ /dev/null @@ -1,159 +0,0 @@ -:plugin: zeromq -:type: input - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-input-zeromq/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Zeromq input plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Read events over a 0MQ SUB socket. - -You need to have the 0mq 2.1.x library installed to be able to use -this input plugin. - -The default settings will create a subscriber binding to `tcp://127.0.0.1:2120` -waiting for connecting publishers. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Zeromq Input Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sockopt>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topic>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topic_field>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topology>> |<>, one of `["pushpull", "pubsub", "pair"]`|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -input plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-address"] -===== `address` - - * Value type is <> - * Default value is `["tcp://*:2120"]` - -0mq socket address to connect or bind -Please note that `inproc://` will not work with logstash -as each we use a context per thread. -By default, inputs bind/listen -and outputs connect - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"server"` - -mode -server mode binds/listens -client mode connects - -[id="{version}-plugins-{type}s-{plugin}-sender"] -===== `sender` - - * Value type is <> - * There is no default value for this setting. - -sender -overrides the sender to -set the source of the event -default is `zmq+topology://type/` - -[id="{version}-plugins-{type}s-{plugin}-sockopt"] -===== `sockopt` - - * Value type is <> - * Default value is `{"ZMQ::RCVTIMEO"=>"1000"}` - -0mq socket options -This exposes `zmq_setsockopt` -for advanced tuning -see http://api.zeromq.org/2-1:zmq-setsockopt for details - -This is where you would set values like: - - * `ZMQ::HWM` - high water mark - * `ZMQ::IDENTITY` - named queues - * `ZMQ::SWAP_SIZE` - space for disk overflow - -Example: -[source,ruby] - sockopt => { - "ZMQ::HWM" => 50 - "ZMQ::IDENTITY" => "my_named_queue" - } - -defaults to: `sockopt => { "ZMQ::RCVTIMEO" => "1000" }`, which has the effect of "interrupting" -the recv operation at least once every second to allow for properly shutdown handling. - -[id="{version}-plugins-{type}s-{plugin}-topic"] -===== `topic` - - * Value type is <> - * There is no default value for this setting. - -0mq topic -This is used for the `pubsub` topology only -On inputs, this allows you to filter messages by topic -On outputs, this allows you to tag a message for routing -NOTE: ZeroMQ does subscriber side filtering. -NOTE: All topics have an implicit wildcard at the end -You can specify multiple topics here - -[id="{version}-plugins-{type}s-{plugin}-topic_field"] -===== `topic_field` - - * Value type is <> - * Default value is `"topic"` - -Event topic field -This is used for the `pubsub` topology only -When a message is received on a topic, the topic name on which -the message was received will saved in this field. - -[id="{version}-plugins-{type}s-{plugin}-topology"] -===== `topology` - - * This is a required setting. - * Value can be any of: `pushpull`, `pubsub`, `pair` - * There is no default value for this setting. - -0mq topology -The default logstash topologies work as follows: - -* pushpull - inputs are pull, outputs are push -* pubsub - inputs are subscribers, outputs are publishers -* pair - inputs are clients, inputs are servers - -If the predefined topology flows don't work for you, -you can change the `mode` setting - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs-index.asciidoc b/docs/versioned-plugins/outputs-index.asciidoc index 91a197d6e..b1f495dc3 100644 --- a/docs/versioned-plugins/outputs-index.asciidoc +++ b/docs/versioned-plugins/outputs-index.asciidoc @@ -3,74 +3,8 @@ include::include/plugin-intro.asciidoc[] -include::outputs/beats-index.asciidoc[] -include::outputs/boundary-index.asciidoc[] -include::outputs/circonus-index.asciidoc[] -include::outputs/cloudwatch-index.asciidoc[] -include::outputs/csv-index.asciidoc[] -include::outputs/datadog-index.asciidoc[] -include::outputs/datadog_metrics-index.asciidoc[] include::outputs/elasticsearch-index.asciidoc[] -include::outputs/elasticsearch_java-index.asciidoc[] include::outputs/email-index.asciidoc[] -include::outputs/example-index.asciidoc[] -include::outputs/exec-index.asciidoc[] include::outputs/file-index.asciidoc[] -include::outputs/firehose-index.asciidoc[] -include::outputs/ganglia-index.asciidoc[] -include::outputs/gelf-index.asciidoc[] -include::outputs/gemfire-index.asciidoc[] -include::outputs/google_bigquery-index.asciidoc[] -include::outputs/google_cloud_storage-index.asciidoc[] include::outputs/graphite-index.asciidoc[] -include::outputs/graphtastic-index.asciidoc[] -include::outputs/hipchat-index.asciidoc[] -include::outputs/http-index.asciidoc[] -include::outputs/icinga-index.asciidoc[] -include::outputs/influxdb-index.asciidoc[] -include::outputs/irc-index.asciidoc[] -include::outputs/jira-index.asciidoc[] -include::outputs/jms-index.asciidoc[] -include::outputs/juggernaut-index.asciidoc[] -include::outputs/kafka-index.asciidoc[] -include::outputs/librato-index.asciidoc[] -include::outputs/logentries-index.asciidoc[] -include::outputs/loggly-index.asciidoc[] -include::outputs/lumberjack-index.asciidoc[] -include::outputs/metriccatcher-index.asciidoc[] -include::outputs/monasca_log_api-index.asciidoc[] -include::outputs/mongodb-index.asciidoc[] -include::outputs/nagios-index.asciidoc[] -include::outputs/nagios_nsca-index.asciidoc[] -include::outputs/neo4j-index.asciidoc[] -include::outputs/newrelic-index.asciidoc[] -include::outputs/null-index.asciidoc[] -include::outputs/opentsdb-index.asciidoc[] -include::outputs/pagerduty-index.asciidoc[] -include::outputs/pipe-index.asciidoc[] -include::outputs/rabbitmq-index.asciidoc[] -include::outputs/rackspace-index.asciidoc[] -include::outputs/rados-index.asciidoc[] -include::outputs/redis-index.asciidoc[] -include::outputs/redmine-index.asciidoc[] -include::outputs/riak-index.asciidoc[] -include::outputs/riemann-index.asciidoc[] -include::outputs/s3-index.asciidoc[] -include::outputs/slack-index.asciidoc[] -include::outputs/sns-index.asciidoc[] -include::outputs/solr_http-index.asciidoc[] -include::outputs/sqs-index.asciidoc[] -include::outputs/statsd-index.asciidoc[] -include::outputs/stdout-index.asciidoc[] -include::outputs/stomp-index.asciidoc[] -include::outputs/syslog-index.asciidoc[] -include::outputs/tcp-index.asciidoc[] -include::outputs/timber-index.asciidoc[] -include::outputs/udp-index.asciidoc[] -include::outputs/webhdfs-index.asciidoc[] -include::outputs/websocket-index.asciidoc[] -include::outputs/xmpp-index.asciidoc[] -include::outputs/zabbix-index.asciidoc[] -include::outputs/zeromq-index.asciidoc[] -include::outputs/zookeeper-index.asciidoc[] diff --git a/docs/versioned-plugins/outputs/beats-index.asciidoc b/docs/versioned-plugins/outputs/beats-index.asciidoc deleted file mode 100644 index 1083feaf5..000000000 --- a/docs/versioned-plugins/outputs/beats-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: beats -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/outputs/boundary-index.asciidoc b/docs/versioned-plugins/outputs/boundary-index.asciidoc deleted file mode 100644 index b05e4a56d..000000000 --- a/docs/versioned-plugins/outputs/boundary-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: boundary -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::boundary-v3.0.4.asciidoc[] -include::boundary-v3.0.3.asciidoc[] -include::boundary-v3.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/boundary-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/boundary-v3.0.2.asciidoc deleted file mode 100644 index f69f950a6..000000000 --- a/docs/versioned-plugins/outputs/boundary-v3.0.2.asciidoc +++ /dev/null @@ -1,139 +0,0 @@ -:plugin: boundary -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-boundary/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Boundary output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you send annotations to -Boundary based on Logstash events - -Note that since Logstash maintains no state -these will be one-shot events - -By default the start and stop time will be -the event timestamp - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Boundary Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-auto>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bsubtype>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-btags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-btype>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-end_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-org_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-start_time>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-api_key"] -===== `api_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Boundary API key - -[id="{version}-plugins-{type}s-{plugin}-auto"] -===== `auto` - - * Value type is <> - * Default value is `false` - -Auto -If set to true, logstash will try to pull boundary fields out -of the event. Any field explicitly set by config options will -override these. -`['type', 'subtype', 'creation_time', 'end_time', 'links', 'tags', 'loc']` - -[id="{version}-plugins-{type}s-{plugin}-bsubtype"] -===== `bsubtype` - - * Value type is <> - * There is no default value for this setting. - -Sub-Type - -[id="{version}-plugins-{type}s-{plugin}-btags"] -===== `btags` - - * Value type is <> - * There is no default value for this setting. - -Tags -Set any custom tags for this event -Default are the Logstash tags if any - -[id="{version}-plugins-{type}s-{plugin}-btype"] -===== `btype` - - * Value type is <> - * There is no default value for this setting. - -Type - -[id="{version}-plugins-{type}s-{plugin}-end_time"] -===== `end_time` - - * Value type is <> - * There is no default value for this setting. - -End time -Override the stop time -Note that Boundary requires this to be seconds since epoch -If overriding, it is your responsibility to type this correctly -By default this is set to `event.get("@timestamp").to_i` - -[id="{version}-plugins-{type}s-{plugin}-org_id"] -===== `org_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Boundary Org ID - -[id="{version}-plugins-{type}s-{plugin}-start_time"] -===== `start_time` - - * Value type is <> - * There is no default value for this setting. - -Start time -Override the start time -Note that Boundary requires this to be seconds since epoch -If overriding, it is your responsibility to type this correctly -By default this is set to `event.get("@timestamp").to_i` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/boundary-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/boundary-v3.0.3.asciidoc deleted file mode 100644 index bcc2a3ab7..000000000 --- a/docs/versioned-plugins/outputs/boundary-v3.0.3.asciidoc +++ /dev/null @@ -1,139 +0,0 @@ -:plugin: boundary -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-boundary/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Boundary output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you send annotations to -Boundary based on Logstash events - -Note that since Logstash maintains no state -these will be one-shot events - -By default the start and stop time will be -the event timestamp - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Boundary Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-auto>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bsubtype>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-btags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-btype>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-end_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-org_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-start_time>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-api_key"] -===== `api_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Boundary API key - -[id="{version}-plugins-{type}s-{plugin}-auto"] -===== `auto` - - * Value type is <> - * Default value is `false` - -Auto -If set to true, logstash will try to pull boundary fields out -of the event. Any field explicitly set by config options will -override these. -`['type', 'subtype', 'creation_time', 'end_time', 'links', 'tags', 'loc']` - -[id="{version}-plugins-{type}s-{plugin}-bsubtype"] -===== `bsubtype` - - * Value type is <> - * There is no default value for this setting. - -Sub-Type - -[id="{version}-plugins-{type}s-{plugin}-btags"] -===== `btags` - - * Value type is <> - * There is no default value for this setting. - -Tags -Set any custom tags for this event -Default are the Logstash tags if any - -[id="{version}-plugins-{type}s-{plugin}-btype"] -===== `btype` - - * Value type is <> - * There is no default value for this setting. - -Type - -[id="{version}-plugins-{type}s-{plugin}-end_time"] -===== `end_time` - - * Value type is <> - * There is no default value for this setting. - -End time -Override the stop time -Note that Boundary requires this to be seconds since epoch -If overriding, it is your responsibility to type this correctly -By default this is set to `event.get("@timestamp").to_i` - -[id="{version}-plugins-{type}s-{plugin}-org_id"] -===== `org_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Boundary Org ID - -[id="{version}-plugins-{type}s-{plugin}-start_time"] -===== `start_time` - - * Value type is <> - * There is no default value for this setting. - -Start time -Override the start time -Note that Boundary requires this to be seconds since epoch -If overriding, it is your responsibility to type this correctly -By default this is set to `event.get("@timestamp").to_i` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/boundary-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/boundary-v3.0.4.asciidoc deleted file mode 100644 index 86783ab71..000000000 --- a/docs/versioned-plugins/outputs/boundary-v3.0.4.asciidoc +++ /dev/null @@ -1,139 +0,0 @@ -:plugin: boundary -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-boundary/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Boundary output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you send annotations to -Boundary based on Logstash events - -Note that since Logstash maintains no state -these will be one-shot events - -By default the start and stop time will be -the event timestamp - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Boundary Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-auto>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bsubtype>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-btags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-btype>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-end_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-org_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-start_time>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-api_key"] -===== `api_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Boundary API key - -[id="{version}-plugins-{type}s-{plugin}-auto"] -===== `auto` - - * Value type is <> - * Default value is `false` - -Auto -If set to true, logstash will try to pull boundary fields out -of the event. Any field explicitly set by config options will -override these. -`['type', 'subtype', 'creation_time', 'end_time', 'links', 'tags', 'loc']` - -[id="{version}-plugins-{type}s-{plugin}-bsubtype"] -===== `bsubtype` - - * Value type is <> - * There is no default value for this setting. - -Sub-Type - -[id="{version}-plugins-{type}s-{plugin}-btags"] -===== `btags` - - * Value type is <> - * There is no default value for this setting. - -Tags -Set any custom tags for this event -Default are the Logstash tags if any - -[id="{version}-plugins-{type}s-{plugin}-btype"] -===== `btype` - - * Value type is <> - * There is no default value for this setting. - -Type - -[id="{version}-plugins-{type}s-{plugin}-end_time"] -===== `end_time` - - * Value type is <> - * There is no default value for this setting. - -End time -Override the stop time -Note that Boundary requires this to be seconds since epoch -If overriding, it is your responsibility to type this correctly -By default this is set to `event.get("@timestamp").to_i` - -[id="{version}-plugins-{type}s-{plugin}-org_id"] -===== `org_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Boundary Org ID - -[id="{version}-plugins-{type}s-{plugin}-start_time"] -===== `start_time` - - * Value type is <> - * There is no default value for this setting. - -Start time -Override the start time -Note that Boundary requires this to be seconds since epoch -If overriding, it is your responsibility to type this correctly -By default this is set to `event.get("@timestamp").to_i` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/circonus-index.asciidoc b/docs/versioned-plugins/outputs/circonus-index.asciidoc deleted file mode 100644 index b5daf84a5..000000000 --- a/docs/versioned-plugins/outputs/circonus-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: circonus -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::circonus-v3.0.4.asciidoc[] -include::circonus-v3.0.3.asciidoc[] -include::circonus-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/circonus-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/circonus-v3.0.1.asciidoc deleted file mode 100644 index 1d963d5bf..000000000 --- a/docs/versioned-plugins/outputs/circonus-v3.0.1.asciidoc +++ /dev/null @@ -1,93 +0,0 @@ -:plugin: circonus -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-circonus/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Circonus output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Circonus Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-annotation>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-api_token>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-app_name>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-annotation"] -===== `annotation` - - * This is a required setting. - * Value type is <> - * Default value is `{}` - -Annotations -Registers an annotation with Circonus -The only required field is `title` and `description`. -`start` and `stop` will be set to the event timestamp. -You can add any other optional annotation values as well. -All values will be passed through `event.sprintf` - -Example: -[source,ruby] - ["title":"Logstash event", "description":"Logstash event for %{host}"] -or -[source,ruby] - ["title":"Logstash event", "description":"Logstash event for %{host}", "parent_id", "1"] - -[id="{version}-plugins-{type}s-{plugin}-api_token"] -===== `api_token` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -This output lets you send annotations to -Circonus based on Logstash events - -Your Circonus API Token - -[id="{version}-plugins-{type}s-{plugin}-app_name"] -===== `app_name` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Circonus App name -This will be passed through `event.sprintf` -so variables are allowed here: - -Example: - `app_name => "%{myappname}"` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/circonus-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/circonus-v3.0.3.asciidoc deleted file mode 100644 index 349aa09d8..000000000 --- a/docs/versioned-plugins/outputs/circonus-v3.0.3.asciidoc +++ /dev/null @@ -1,91 +0,0 @@ -:plugin: circonus -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-circonus/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Circonus output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output sends annotations to Circonus based on Logstash events. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Circonus Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-annotation>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-api_token>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-app_name>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-annotation"] -===== `annotation` - - * This is a required setting. - * Value type is <> - * Default value is `{}` - -Annotations -Registers an annotation with Circonus -The only required field is `title` and `description`. -`start` and `stop` will be set to the event timestamp. -You can add any other optional annotation values as well. -All values will be passed through `event.sprintf` - -Example: -[source,ruby] - ["title":"Logstash event", "description":"Logstash event for %{host}"] -or -[source,ruby] - ["title":"Logstash event", "description":"Logstash event for %{host}", "parent_id", "1"] - -[id="{version}-plugins-{type}s-{plugin}-api_token"] -===== `api_token` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Circonus API Token - -[id="{version}-plugins-{type}s-{plugin}-app_name"] -===== `app_name` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Circonus App name -This will be passed through `event.sprintf` -so variables are allowed here: - -Example: - `app_name => "%{myappname}"` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/circonus-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/circonus-v3.0.4.asciidoc deleted file mode 100644 index d99c5bae8..000000000 --- a/docs/versioned-plugins/outputs/circonus-v3.0.4.asciidoc +++ /dev/null @@ -1,91 +0,0 @@ -:plugin: circonus -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-circonus/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Circonus output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output sends annotations to Circonus based on Logstash events. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Circonus Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-annotation>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-api_token>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-app_name>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-annotation"] -===== `annotation` - - * This is a required setting. - * Value type is <> - * Default value is `{}` - -Annotations -Registers an annotation with Circonus -The only required field is `title` and `description`. -`start` and `stop` will be set to the event timestamp. -You can add any other optional annotation values as well. -All values will be passed through `event.sprintf` - -Example: -[source,ruby] - ["title":"Logstash event", "description":"Logstash event for %{host}"] -or -[source,ruby] - ["title":"Logstash event", "description":"Logstash event for %{host}", "parent_id", "1"] - -[id="{version}-plugins-{type}s-{plugin}-api_token"] -===== `api_token` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Circonus API Token - -[id="{version}-plugins-{type}s-{plugin}-app_name"] -===== `app_name` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Circonus App name -This will be passed through `event.sprintf` -so variables are allowed here: - -Example: - `app_name => "%{myappname}"` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/cloudwatch-index.asciidoc b/docs/versioned-plugins/outputs/cloudwatch-index.asciidoc deleted file mode 100644 index df1cfed83..000000000 --- a/docs/versioned-plugins/outputs/cloudwatch-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: cloudwatch -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::cloudwatch-v3.0.7.asciidoc[] -include::cloudwatch-v3.0.6.asciidoc[] -include::cloudwatch-v3.0.5.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/cloudwatch-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/cloudwatch-v3.0.5.asciidoc deleted file mode 100644 index b66c5f10a..000000000 --- a/docs/versioned-plugins/outputs/cloudwatch-v3.0.5.asciidoc +++ /dev/null @@ -1,317 +0,0 @@ -:plugin: cloudwatch -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-cloudwatch/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Cloudwatch output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you aggregate and send metric data to AWS CloudWatch - -==== Summary: -This plugin is intended to be used on a logstash indexer agent (but that -is not the only way, see below.) In the intended scenario, one cloudwatch -output plugin is configured, on the logstash indexer node, with just AWS API -credentials, and possibly a region and/or a namespace. The output looks -for fields present in events, and when it finds them, it uses them to -calculate aggregate statistics. If the `metricname` option is set in this -output, then any events which pass through it will be aggregated & sent to -CloudWatch, but that is not recommended. The intended use is to NOT set the -metricname option here, and instead to add a `CW_metricname` field (and other -fields) to only the events you want sent to CloudWatch. - -When events pass through this output they are queued for background -aggregation and sending, which happens every minute by default. The -queue has a maximum size, and when it is full aggregated statistics will be -sent to CloudWatch ahead of schedule. Whenever this happens a warning -message is written to logstash's log. If you see this you should increase -the `queue_size` configuration option to avoid the extra API calls. The queue -is emptied every time we send data to CloudWatch. - -Note: when logstash is stopped the queue is destroyed before it can be processed. -This is a known limitation of logstash and will hopefully be addressed in a -future version. - -==== Details: -There are two ways to configure this plugin, and they can be used in -combination: event fields & per-output defaults - -Event Field configuration... -You add fields to your events in inputs & filters and this output reads -those fields to aggregate events. The names of the fields read are -configurable via the `field_*` options. - -Per-output defaults... -You set universal defaults in this output plugin's configuration, and -if an event does not have a field for that option then the default is -used. - -Notice, the event fields take precedence over the per-output defaults. - -At a minimum events must have a "metric name" to be sent to CloudWatch. -This can be achieved either by providing a default here OR by adding a -`CW_metricname` field. By default, if no other configuration is provided -besides a metric name, then events will be counted (Unit: Count, Value: 1) -by their metric name (either a default or from their `CW_metricname` field) - -Other fields which can be added to events to modify the behavior of this -plugin are, `CW_namespace`, `CW_unit`, `CW_value`, and -`CW_dimensions`. All of these field names are configurable in -this output. You can also set per-output defaults for any of them. -See below for details. - -Read more about http://aws.amazon.com/cloudwatch/[AWS CloudWatch], -and the specific of API endpoint this output uses, -http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_PutMetricData.html[PutMetricData] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cloudwatch Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dimensions>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_dimensions>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_metricname>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_namespace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_unit>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_value>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metricname>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeframe>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-unit>> |<>, one of `["Seconds", "Microseconds", "Milliseconds", "Bytes", "Kilobytes", "Megabytes", "Gigabytes", "Terabytes", "Bits", "Kilobits", "Megabits", "Gigabits", "Terabits", "Percent", "Count", "Bytes/Second", "Kilobytes/Second", "Megabytes/Second", "Gigabytes/Second", "Terabytes/Second", "Bits/Second", "Kilobits/Second", "Megabits/Second", "Gigabits/Second", "Terabits/Second", "Count/Second", "None"]`|No -| <<{version}-plugins-{type}s-{plugin}-value>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-batch_size"] -===== `batch_size` - - * Value type is <> - * Default value is `20` - -How many data points can be given in one call to the CloudWatch API - -[id="{version}-plugins-{type}s-{plugin}-dimensions"] -===== `dimensions` - - * Value type is <> - * There is no default value for this setting. - -The default dimensions [ name, value, ... ] to use for events which do not have a `CW_dimensions` field - -[id="{version}-plugins-{type}s-{plugin}-field_dimensions"] -===== `field_dimensions` - - * Value type is <> - * Default value is `"CW_dimensions"` - -The name of the field used to set the dimensions on an event metric -The field named here, if present in an event, must have an array of -one or more key & value pairs, for example... - `add_field => [ "CW_dimensions", "Environment", "CW_dimensions", "prod" ]` -or, equivalently... - `add_field => [ "CW_dimensions", "Environment" ]` - `add_field => [ "CW_dimensions", "prod" ]` - -[id="{version}-plugins-{type}s-{plugin}-field_metricname"] -===== `field_metricname` - - * Value type is <> - * Default value is `"CW_metricname"` - -The name of the field used to set the metric name on an event -The author of this plugin recommends adding this field to events in inputs & -filters rather than using the per-output default setting so that one output -plugin on your logstash indexer can serve all events (which of course had -fields set on your logstash shippers.) - -[id="{version}-plugins-{type}s-{plugin}-field_namespace"] -===== `field_namespace` - - * Value type is <> - * Default value is `"CW_namespace"` - -The name of the field used to set a different namespace per event -Note: Only one namespace can be sent to CloudWatch per API call -so setting different namespaces will increase the number of API calls -and those cost money. - -[id="{version}-plugins-{type}s-{plugin}-field_unit"] -===== `field_unit` - - * Value type is <> - * Default value is `"CW_unit"` - -The name of the field used to set the unit on an event metric - -[id="{version}-plugins-{type}s-{plugin}-field_value"] -===== `field_value` - - * Value type is <> - * Default value is `"CW_value"` - -The name of the field used to set the value (float) on an event metric - -[id="{version}-plugins-{type}s-{plugin}-metricname"] -===== `metricname` - - * Value type is <> - * There is no default value for this setting. - -The default metric name to use for events which do not have a `CW_metricname` field. -Beware: If this is provided then all events which pass through this output will be aggregated and -sent to CloudWatch, so use this carefully. Furthermore, when providing this option, you -will probably want to also restrict events from passing through this output using event -type, tag, and field matching - -[id="{version}-plugins-{type}s-{plugin}-namespace"] -===== `namespace` - - * Value type is <> - * Default value is `"Logstash"` - -The default namespace to use for events which do not have a `CW_namespace` field - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-queue_size"] -===== `queue_size` - - * Value type is <> - * Default value is `10000` - -How many events to queue before forcing a call to the CloudWatch API ahead of `timeframe` schedule -Set this to the number of events-per-timeframe you will be sending to CloudWatch to avoid extra API calls - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - -[id="{version}-plugins-{type}s-{plugin}-timeframe"] -===== `timeframe` - - * Value type is <> - * Default value is `"1m"` - -Constants -aggregate_key members -Units -How often to send data to CloudWatch -This does not affect the event timestamps, events will always have their -actual timestamp (to-the-minute) sent to CloudWatch. - -We only call the API if there is data to send. - -See the Rufus Scheduler docs for an https://github.com/jmettraux/rufus-scheduler#the-time-strings-understood-by-rufus-scheduler[explanation of allowed values] - -[id="{version}-plugins-{type}s-{plugin}-unit"] -===== `unit` - - * Value can be any of: `Seconds`, `Microseconds`, `Milliseconds`, `Bytes`, `Kilobytes`, `Megabytes`, `Gigabytes`, `Terabytes`, `Bits`, `Kilobits`, `Megabits`, `Gigabits`, `Terabits`, `Percent`, `Count`, `Bytes/Second`, `Kilobytes/Second`, `Megabytes/Second`, `Gigabytes/Second`, `Terabytes/Second`, `Bits/Second`, `Kilobits/Second`, `Megabits/Second`, `Gigabits/Second`, `Terabits/Second`, `Count/Second`, `None` - * Default value is `"Count"` - -The default unit to use for events which do not have a `CW_unit` field -If you set this option you should probably set the "value" option along with it - -[id="{version}-plugins-{type}s-{plugin}-value"] -===== `value` - - * Value type is <> - * Default value is `"1"` - -The default value to use for events which do not have a `CW_value` field -If provided, this must be a string which can be converted to a float, for example... - "1", "2.34", ".5", and "0.67" -If you set this option you should probably set the `unit` option along with it - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/cloudwatch-v3.0.6.asciidoc b/docs/versioned-plugins/outputs/cloudwatch-v3.0.6.asciidoc deleted file mode 100644 index 1dbf14d83..000000000 --- a/docs/versioned-plugins/outputs/cloudwatch-v3.0.6.asciidoc +++ /dev/null @@ -1,317 +0,0 @@ -:plugin: cloudwatch -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-cloudwatch/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Cloudwatch output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you aggregate and send metric data to AWS CloudWatch - -==== Summary: -This plugin is intended to be used on a logstash indexer agent (but that -is not the only way, see below.) In the intended scenario, one cloudwatch -output plugin is configured, on the logstash indexer node, with just AWS API -credentials, and possibly a region and/or a namespace. The output looks -for fields present in events, and when it finds them, it uses them to -calculate aggregate statistics. If the `metricname` option is set in this -output, then any events which pass through it will be aggregated & sent to -CloudWatch, but that is not recommended. The intended use is to NOT set the -metricname option here, and instead to add a `CW_metricname` field (and other -fields) to only the events you want sent to CloudWatch. - -When events pass through this output they are queued for background -aggregation and sending, which happens every minute by default. The -queue has a maximum size, and when it is full aggregated statistics will be -sent to CloudWatch ahead of schedule. Whenever this happens a warning -message is written to logstash's log. If you see this you should increase -the `queue_size` configuration option to avoid the extra API calls. The queue -is emptied every time we send data to CloudWatch. - -Note: when logstash is stopped the queue is destroyed before it can be processed. -This is a known limitation of logstash and will hopefully be addressed in a -future version. - -==== Details: -There are two ways to configure this plugin, and they can be used in -combination: event fields & per-output defaults - -Event Field configuration... -You add fields to your events in inputs & filters and this output reads -those fields to aggregate events. The names of the fields read are -configurable via the `field_*` options. - -Per-output defaults... -You set universal defaults in this output plugin's configuration, and -if an event does not have a field for that option then the default is -used. - -Notice, the event fields take precedence over the per-output defaults. - -At a minimum events must have a "metric name" to be sent to CloudWatch. -This can be achieved either by providing a default here OR by adding a -`CW_metricname` field. By default, if no other configuration is provided -besides a metric name, then events will be counted (Unit: Count, Value: 1) -by their metric name (either a default or from their `CW_metricname` field) - -Other fields which can be added to events to modify the behavior of this -plugin are, `CW_namespace`, `CW_unit`, `CW_value`, and -`CW_dimensions`. All of these field names are configurable in -this output. You can also set per-output defaults for any of them. -See below for details. - -Read more about http://aws.amazon.com/cloudwatch/[AWS CloudWatch], -and the specific of API endpoint this output uses, -http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_PutMetricData.html[PutMetricData] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cloudwatch Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dimensions>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_dimensions>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_metricname>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_namespace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_unit>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_value>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metricname>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeframe>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-unit>> |<>, one of `["Seconds", "Microseconds", "Milliseconds", "Bytes", "Kilobytes", "Megabytes", "Gigabytes", "Terabytes", "Bits", "Kilobits", "Megabits", "Gigabits", "Terabits", "Percent", "Count", "Bytes/Second", "Kilobytes/Second", "Megabytes/Second", "Gigabytes/Second", "Terabytes/Second", "Bits/Second", "Kilobits/Second", "Megabits/Second", "Gigabits/Second", "Terabits/Second", "Count/Second", "None"]`|No -| <<{version}-plugins-{type}s-{plugin}-value>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-batch_size"] -===== `batch_size` - - * Value type is <> - * Default value is `20` - -How many data points can be given in one call to the CloudWatch API - -[id="{version}-plugins-{type}s-{plugin}-dimensions"] -===== `dimensions` - - * Value type is <> - * There is no default value for this setting. - -The default dimensions [ name, value, ... ] to use for events which do not have a `CW_dimensions` field - -[id="{version}-plugins-{type}s-{plugin}-field_dimensions"] -===== `field_dimensions` - - * Value type is <> - * Default value is `"CW_dimensions"` - -The name of the field used to set the dimensions on an event metric -The field named here, if present in an event, must have an array of -one or more key & value pairs, for example... - `add_field => [ "CW_dimensions", "Environment", "CW_dimensions", "prod" ]` -or, equivalently... - `add_field => [ "CW_dimensions", "Environment" ]` - `add_field => [ "CW_dimensions", "prod" ]` - -[id="{version}-plugins-{type}s-{plugin}-field_metricname"] -===== `field_metricname` - - * Value type is <> - * Default value is `"CW_metricname"` - -The name of the field used to set the metric name on an event -The author of this plugin recommends adding this field to events in inputs & -filters rather than using the per-output default setting so that one output -plugin on your logstash indexer can serve all events (which of course had -fields set on your logstash shippers.) - -[id="{version}-plugins-{type}s-{plugin}-field_namespace"] -===== `field_namespace` - - * Value type is <> - * Default value is `"CW_namespace"` - -The name of the field used to set a different namespace per event -Note: Only one namespace can be sent to CloudWatch per API call -so setting different namespaces will increase the number of API calls -and those cost money. - -[id="{version}-plugins-{type}s-{plugin}-field_unit"] -===== `field_unit` - - * Value type is <> - * Default value is `"CW_unit"` - -The name of the field used to set the unit on an event metric - -[id="{version}-plugins-{type}s-{plugin}-field_value"] -===== `field_value` - - * Value type is <> - * Default value is `"CW_value"` - -The name of the field used to set the value (float) on an event metric - -[id="{version}-plugins-{type}s-{plugin}-metricname"] -===== `metricname` - - * Value type is <> - * There is no default value for this setting. - -The default metric name to use for events which do not have a `CW_metricname` field. -Beware: If this is provided then all events which pass through this output will be aggregated and -sent to CloudWatch, so use this carefully. Furthermore, when providing this option, you -will probably want to also restrict events from passing through this output using event -type, tag, and field matching - -[id="{version}-plugins-{type}s-{plugin}-namespace"] -===== `namespace` - - * Value type is <> - * Default value is `"Logstash"` - -The default namespace to use for events which do not have a `CW_namespace` field - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-queue_size"] -===== `queue_size` - - * Value type is <> - * Default value is `10000` - -How many events to queue before forcing a call to the CloudWatch API ahead of `timeframe` schedule -Set this to the number of events-per-timeframe you will be sending to CloudWatch to avoid extra API calls - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - -[id="{version}-plugins-{type}s-{plugin}-timeframe"] -===== `timeframe` - - * Value type is <> - * Default value is `"1m"` - -Constants -aggregate_key members -Units -How often to send data to CloudWatch -This does not affect the event timestamps, events will always have their -actual timestamp (to-the-minute) sent to CloudWatch. - -We only call the API if there is data to send. - -See the Rufus Scheduler docs for an https://github.com/jmettraux/rufus-scheduler#the-time-strings-understood-by-rufus-scheduler[explanation of allowed values] - -[id="{version}-plugins-{type}s-{plugin}-unit"] -===== `unit` - - * Value can be any of: `Seconds`, `Microseconds`, `Milliseconds`, `Bytes`, `Kilobytes`, `Megabytes`, `Gigabytes`, `Terabytes`, `Bits`, `Kilobits`, `Megabits`, `Gigabits`, `Terabits`, `Percent`, `Count`, `Bytes/Second`, `Kilobytes/Second`, `Megabytes/Second`, `Gigabytes/Second`, `Terabytes/Second`, `Bits/Second`, `Kilobits/Second`, `Megabits/Second`, `Gigabits/Second`, `Terabits/Second`, `Count/Second`, `None` - * Default value is `"Count"` - -The default unit to use for events which do not have a `CW_unit` field -If you set this option you should probably set the "value" option along with it - -[id="{version}-plugins-{type}s-{plugin}-value"] -===== `value` - - * Value type is <> - * Default value is `"1"` - -The default value to use for events which do not have a `CW_value` field -If provided, this must be a string which can be converted to a float, for example... - "1", "2.34", ".5", and "0.67" -If you set this option you should probably set the `unit` option along with it - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/cloudwatch-v3.0.7.asciidoc b/docs/versioned-plugins/outputs/cloudwatch-v3.0.7.asciidoc deleted file mode 100644 index 33ef59919..000000000 --- a/docs/versioned-plugins/outputs/cloudwatch-v3.0.7.asciidoc +++ /dev/null @@ -1,317 +0,0 @@ -:plugin: cloudwatch -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.7 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-cloudwatch/blob/v3.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Cloudwatch output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you aggregate and send metric data to AWS CloudWatch - -==== Summary: -This plugin is intended to be used on a logstash indexer agent (but that -is not the only way, see below.) In the intended scenario, one cloudwatch -output plugin is configured, on the logstash indexer node, with just AWS API -credentials, and possibly a region and/or a namespace. The output looks -for fields present in events, and when it finds them, it uses them to -calculate aggregate statistics. If the `metricname` option is set in this -output, then any events which pass through it will be aggregated & sent to -CloudWatch, but that is not recommended. The intended use is to NOT set the -metricname option here, and instead to add a `CW_metricname` field (and other -fields) to only the events you want sent to CloudWatch. - -When events pass through this output they are queued for background -aggregation and sending, which happens every minute by default. The -queue has a maximum size, and when it is full aggregated statistics will be -sent to CloudWatch ahead of schedule. Whenever this happens a warning -message is written to logstash's log. If you see this you should increase -the `queue_size` configuration option to avoid the extra API calls. The queue -is emptied every time we send data to CloudWatch. - -Note: when logstash is stopped the queue is destroyed before it can be processed. -This is a known limitation of logstash and will hopefully be addressed in a -future version. - -==== Details: -There are two ways to configure this plugin, and they can be used in -combination: event fields & per-output defaults - -Event Field configuration... -You add fields to your events in inputs & filters and this output reads -those fields to aggregate events. The names of the fields read are -configurable via the `field_*` options. - -Per-output defaults... -You set universal defaults in this output plugin's configuration, and -if an event does not have a field for that option then the default is -used. - -Notice, the event fields take precedence over the per-output defaults. - -At a minimum events must have a "metric name" to be sent to CloudWatch. -This can be achieved either by providing a default here OR by adding a -`CW_metricname` field. By default, if no other configuration is provided -besides a metric name, then events will be counted (Unit: Count, Value: 1) -by their metric name (either a default or from their `CW_metricname` field) - -Other fields which can be added to events to modify the behavior of this -plugin are, `CW_namespace`, `CW_unit`, `CW_value`, and -`CW_dimensions`. All of these field names are configurable in -this output. You can also set per-output defaults for any of them. -See below for details. - -Read more about http://aws.amazon.com/cloudwatch/[AWS CloudWatch], -and the specific of API endpoint this output uses, -http://docs.amazonwebservices.com/AmazonCloudWatch/latest/APIReference/API_PutMetricData.html[PutMetricData] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Cloudwatch Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dimensions>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_dimensions>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_metricname>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_namespace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_unit>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-field_value>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metricname>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeframe>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-unit>> |<>, one of `["Seconds", "Microseconds", "Milliseconds", "Bytes", "Kilobytes", "Megabytes", "Gigabytes", "Terabytes", "Bits", "Kilobits", "Megabits", "Gigabits", "Terabits", "Percent", "Count", "Bytes/Second", "Kilobytes/Second", "Megabytes/Second", "Gigabytes/Second", "Terabytes/Second", "Bits/Second", "Kilobits/Second", "Megabits/Second", "Gigabits/Second", "Terabits/Second", "Count/Second", "None"]`|No -| <<{version}-plugins-{type}s-{plugin}-value>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-batch_size"] -===== `batch_size` - - * Value type is <> - * Default value is `20` - -How many data points can be given in one call to the CloudWatch API - -[id="{version}-plugins-{type}s-{plugin}-dimensions"] -===== `dimensions` - - * Value type is <> - * There is no default value for this setting. - -The default dimensions [ name, value, ... ] to use for events which do not have a `CW_dimensions` field - -[id="{version}-plugins-{type}s-{plugin}-field_dimensions"] -===== `field_dimensions` - - * Value type is <> - * Default value is `"CW_dimensions"` - -The name of the field used to set the dimensions on an event metric -The field named here, if present in an event, must have an array of -one or more key & value pairs, for example... - `add_field => [ "CW_dimensions", "Environment", "CW_dimensions", "prod" ]` -or, equivalently... - `add_field => [ "CW_dimensions", "Environment" ]` - `add_field => [ "CW_dimensions", "prod" ]` - -[id="{version}-plugins-{type}s-{plugin}-field_metricname"] -===== `field_metricname` - - * Value type is <> - * Default value is `"CW_metricname"` - -The name of the field used to set the metric name on an event -The author of this plugin recommends adding this field to events in inputs & -filters rather than using the per-output default setting so that one output -plugin on your logstash indexer can serve all events (which of course had -fields set on your logstash shippers.) - -[id="{version}-plugins-{type}s-{plugin}-field_namespace"] -===== `field_namespace` - - * Value type is <> - * Default value is `"CW_namespace"` - -The name of the field used to set a different namespace per event -Note: Only one namespace can be sent to CloudWatch per API call -so setting different namespaces will increase the number of API calls -and those cost money. - -[id="{version}-plugins-{type}s-{plugin}-field_unit"] -===== `field_unit` - - * Value type is <> - * Default value is `"CW_unit"` - -The name of the field used to set the unit on an event metric - -[id="{version}-plugins-{type}s-{plugin}-field_value"] -===== `field_value` - - * Value type is <> - * Default value is `"CW_value"` - -The name of the field used to set the value (float) on an event metric - -[id="{version}-plugins-{type}s-{plugin}-metricname"] -===== `metricname` - - * Value type is <> - * There is no default value for this setting. - -The default metric name to use for events which do not have a `CW_metricname` field. -Beware: If this is provided then all events which pass through this output will be aggregated and -sent to CloudWatch, so use this carefully. Furthermore, when providing this option, you -will probably want to also restrict events from passing through this output using event -type, tag, and field matching - -[id="{version}-plugins-{type}s-{plugin}-namespace"] -===== `namespace` - - * Value type is <> - * Default value is `"Logstash"` - -The default namespace to use for events which do not have a `CW_namespace` field - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-queue_size"] -===== `queue_size` - - * Value type is <> - * Default value is `10000` - -How many events to queue before forcing a call to the CloudWatch API ahead of `timeframe` schedule -Set this to the number of events-per-timeframe you will be sending to CloudWatch to avoid extra API calls - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - -[id="{version}-plugins-{type}s-{plugin}-timeframe"] -===== `timeframe` - - * Value type is <> - * Default value is `"1m"` - -Constants -aggregate_key members -Units -How often to send data to CloudWatch -This does not affect the event timestamps, events will always have their -actual timestamp (to-the-minute) sent to CloudWatch. - -We only call the API if there is data to send. - -See the Rufus Scheduler docs for an https://github.com/jmettraux/rufus-scheduler#the-time-strings-understood-by-rufus-scheduler[explanation of allowed values] - -[id="{version}-plugins-{type}s-{plugin}-unit"] -===== `unit` - - * Value can be any of: `Seconds`, `Microseconds`, `Milliseconds`, `Bytes`, `Kilobytes`, `Megabytes`, `Gigabytes`, `Terabytes`, `Bits`, `Kilobits`, `Megabits`, `Gigabits`, `Terabits`, `Percent`, `Count`, `Bytes/Second`, `Kilobytes/Second`, `Megabytes/Second`, `Gigabytes/Second`, `Terabytes/Second`, `Bits/Second`, `Kilobits/Second`, `Megabits/Second`, `Gigabits/Second`, `Terabits/Second`, `Count/Second`, `None` - * Default value is `"Count"` - -The default unit to use for events which do not have a `CW_unit` field -If you set this option you should probably set the "value" option along with it - -[id="{version}-plugins-{type}s-{plugin}-value"] -===== `value` - - * Value type is <> - * Default value is `"1"` - -The default value to use for events which do not have a `CW_value` field -If provided, this must be a string which can be converted to a float, for example... - "1", "2.34", ".5", and "0.67" -If you set this option you should probably set the `unit` option along with it - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/csv-index.asciidoc b/docs/versioned-plugins/outputs/csv-index.asciidoc deleted file mode 100644 index 3747f9fd3..000000000 --- a/docs/versioned-plugins/outputs/csv-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: csv -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::csv-v3.0.6.asciidoc[] -include::csv-v3.0.5.asciidoc[] -include::csv-v3.0.4.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/csv-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/csv-v3.0.4.asciidoc deleted file mode 100644 index 2130816c2..000000000 --- a/docs/versioned-plugins/outputs/csv-v3.0.4.asciidoc +++ /dev/null @@ -1,175 +0,0 @@ -:plugin: csv -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-csv/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Csv output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -CSV output. - -Write events to disk in CSV or other delimited format -Based on the file output, many config values are shared -Uses the Ruby csv library internally - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Csv Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-csv_options>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-spreadsheet_safe>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] -===== `create_if_deleted` - - * Value type is <> - * Default value is `true` - -If the configured file is deleted, but an event is handled by the plugin, -the plugin will recreate the file. Default => true - -[id="{version}-plugins-{type}s-{plugin}-csv_options"] -===== `csv_options` - - * Value type is <> - * Default value is `{}` - -Options for CSV output. This is passed directly to the Ruby stdlib to_csv function. -Full documentation is available on the http://ruby-doc.org/stdlib-2.0.0/libdoc/csv/rdoc/index.html[Ruby CSV documentation page]. -A typical use case would be to use alternative column or row seperators eg: `csv_options => {"col_sep" => "\t" "row_sep" => "\r\n"}` gives tab seperated data with windows line endings - -[id="{version}-plugins-{type}s-{plugin}-dir_mode"] -===== `dir_mode` - - * Value type is <> - * Default value is `-1` - -Dir access mode to use. Note that due to the bug in jruby system umask -is ignored on linux: https://github.com/jruby/jruby/issues/3426 -Setting it to -1 uses default OS value. -Example: `"dir_mode" => 0750` - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field names from the event that should be written to the CSV file. -Fields are written to the CSV in the same order as the array. -If a field does not exist on the event, an empty string will be written. -Supports field reference syntax eg: `fields => ["field1", "[nested][field]"]`. - -[id="{version}-plugins-{type}s-{plugin}-file_mode"] -===== `file_mode` - - * Value type is <> - * Default value is `-1` - -File access mode to use. Note that due to the bug in jruby system umask -is ignored on linux: https://github.com/jruby/jruby/issues/3426 -Setting it to -1 uses default OS value. -Example: `"file_mode" => 0640` - -[id="{version}-plugins-{type}s-{plugin}-filename_failure"] -===== `filename_failure` - - * Value type is <> - * Default value is `"_filepath_failures"` - -If the generated path is invalid, the events will be saved -into this file and inside the defined path. - -[id="{version}-plugins-{type}s-{plugin}-flush_interval"] -===== `flush_interval` - - * Value type is <> - * Default value is `2` - -Flush interval (in seconds) for flushing writes to log files. -0 will flush on every message. - -[id="{version}-plugins-{type}s-{plugin}-gzip"] -===== `gzip` - - * Value type is <> - * Default value is `false` - -Gzip the output stream before writing to disk. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -This output writes events to files on disk. You can use fields -from the event as parts of the filename and/or path. - -By default, this output writes one event per line in **json** format. -You can customise the line format using the `line` codec like -[source,ruby] -output { - file { - path => ... - codec => line { format => "custom format: %{message}"} - } -} -The path to the file to write. Event fields can be used here, -like `/var/log/logstash/%{host}/%{application}` -One may also utilize the path option for date-based log -rotation via the joda time format. This will use the event -timestamp. -E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create -`./test-2013-05-29.txt` - -If you use an absolute path you cannot start with a dynamic string. -E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths - -[id="{version}-plugins-{type}s-{plugin}-spreadsheet_safe"] -===== `spreadsheet_safe` - - * Value type is <> - * Default value is `true` - -Option to not escape/munge string values. Please note turning off this option -may not make the values safe in your spreadsheet application - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/csv-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/csv-v3.0.5.asciidoc deleted file mode 100644 index 795a42e69..000000000 --- a/docs/versioned-plugins/outputs/csv-v3.0.5.asciidoc +++ /dev/null @@ -1,175 +0,0 @@ -:plugin: csv -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-csv/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Csv output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -CSV output. - -Write events to disk in CSV or other delimited format -Based on the file output, many config values are shared -Uses the Ruby csv library internally - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Csv Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-csv_options>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-spreadsheet_safe>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] -===== `create_if_deleted` - - * Value type is <> - * Default value is `true` - -If the configured file is deleted, but an event is handled by the plugin, -the plugin will recreate the file. Default => true - -[id="{version}-plugins-{type}s-{plugin}-csv_options"] -===== `csv_options` - - * Value type is <> - * Default value is `{}` - -Options for CSV output. This is passed directly to the Ruby stdlib to_csv function. -Full documentation is available on the http://ruby-doc.org/stdlib-2.0.0/libdoc/csv/rdoc/index.html[Ruby CSV documentation page]. -A typical use case would be to use alternative column or row seperators eg: `csv_options => {"col_sep" => "\t" "row_sep" => "\r\n"}` gives tab seperated data with windows line endings - -[id="{version}-plugins-{type}s-{plugin}-dir_mode"] -===== `dir_mode` - - * Value type is <> - * Default value is `-1` - -Dir access mode to use. Note that due to the bug in jruby system umask -is ignored on linux: https://github.com/jruby/jruby/issues/3426 -Setting it to -1 uses default OS value. -Example: `"dir_mode" => 0750` - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field names from the event that should be written to the CSV file. -Fields are written to the CSV in the same order as the array. -If a field does not exist on the event, an empty string will be written. -Supports field reference syntax eg: `fields => ["field1", "[nested][field]"]`. - -[id="{version}-plugins-{type}s-{plugin}-file_mode"] -===== `file_mode` - - * Value type is <> - * Default value is `-1` - -File access mode to use. Note that due to the bug in jruby system umask -is ignored on linux: https://github.com/jruby/jruby/issues/3426 -Setting it to -1 uses default OS value. -Example: `"file_mode" => 0640` - -[id="{version}-plugins-{type}s-{plugin}-filename_failure"] -===== `filename_failure` - - * Value type is <> - * Default value is `"_filepath_failures"` - -If the generated path is invalid, the events will be saved -into this file and inside the defined path. - -[id="{version}-plugins-{type}s-{plugin}-flush_interval"] -===== `flush_interval` - - * Value type is <> - * Default value is `2` - -Flush interval (in seconds) for flushing writes to log files. -0 will flush on every message. - -[id="{version}-plugins-{type}s-{plugin}-gzip"] -===== `gzip` - - * Value type is <> - * Default value is `false` - -Gzip the output stream before writing to disk. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -This output writes events to files on disk. You can use fields -from the event as parts of the filename and/or path. - -By default, this output writes one event per line in **json** format. -You can customise the line format using the `line` codec like -[source,ruby] -output { - file { - path => ... - codec => line { format => "custom format: %{message}"} - } -} -The path to the file to write. Event fields can be used here, -like `/var/log/logstash/%{host}/%{application}` -One may also utilize the path option for date-based log -rotation via the joda time format. This will use the event -timestamp. -E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create -`./test-2013-05-29.txt` - -If you use an absolute path you cannot start with a dynamic string. -E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths - -[id="{version}-plugins-{type}s-{plugin}-spreadsheet_safe"] -===== `spreadsheet_safe` - - * Value type is <> - * Default value is `true` - -Option to not escape/munge string values. Please note turning off this option -may not make the values safe in your spreadsheet application - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/csv-v3.0.6.asciidoc b/docs/versioned-plugins/outputs/csv-v3.0.6.asciidoc deleted file mode 100644 index f5526fded..000000000 --- a/docs/versioned-plugins/outputs/csv-v3.0.6.asciidoc +++ /dev/null @@ -1,175 +0,0 @@ -:plugin: csv -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-csv/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Csv output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -CSV output. - -Write events to disk in CSV or other delimited format -Based on the file output, many config values are shared -Uses the Ruby csv library internally - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Csv Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-csv_options>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-spreadsheet_safe>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] -===== `create_if_deleted` - - * Value type is <> - * Default value is `true` - -If the configured file is deleted, but an event is handled by the plugin, -the plugin will recreate the file. Default => true - -[id="{version}-plugins-{type}s-{plugin}-csv_options"] -===== `csv_options` - - * Value type is <> - * Default value is `{}` - -Options for CSV output. This is passed directly to the Ruby stdlib to_csv function. -Full documentation is available on the http://ruby-doc.org/stdlib-2.0.0/libdoc/csv/rdoc/index.html[Ruby CSV documentation page]. -A typical use case would be to use alternative column or row seperators eg: `csv_options => {"col_sep" => "\t" "row_sep" => "\r\n"}` gives tab seperated data with windows line endings - -[id="{version}-plugins-{type}s-{plugin}-dir_mode"] -===== `dir_mode` - - * Value type is <> - * Default value is `-1` - -Dir access mode to use. Note that due to the bug in jruby system umask -is ignored on linux: https://github.com/jruby/jruby/issues/3426 -Setting it to -1 uses default OS value. -Example: `"dir_mode" => 0750` - -[id="{version}-plugins-{type}s-{plugin}-fields"] -===== `fields` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field names from the event that should be written to the CSV file. -Fields are written to the CSV in the same order as the array. -If a field does not exist on the event, an empty string will be written. -Supports field reference syntax eg: `fields => ["field1", "[nested][field]"]`. - -[id="{version}-plugins-{type}s-{plugin}-file_mode"] -===== `file_mode` - - * Value type is <> - * Default value is `-1` - -File access mode to use. Note that due to the bug in jruby system umask -is ignored on linux: https://github.com/jruby/jruby/issues/3426 -Setting it to -1 uses default OS value. -Example: `"file_mode" => 0640` - -[id="{version}-plugins-{type}s-{plugin}-filename_failure"] -===== `filename_failure` - - * Value type is <> - * Default value is `"_filepath_failures"` - -If the generated path is invalid, the events will be saved -into this file and inside the defined path. - -[id="{version}-plugins-{type}s-{plugin}-flush_interval"] -===== `flush_interval` - - * Value type is <> - * Default value is `2` - -Flush interval (in seconds) for flushing writes to log files. -0 will flush on every message. - -[id="{version}-plugins-{type}s-{plugin}-gzip"] -===== `gzip` - - * Value type is <> - * Default value is `false` - -Gzip the output stream before writing to disk. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -This output writes events to files on disk. You can use fields -from the event as parts of the filename and/or path. - -By default, this output writes one event per line in **json** format. -You can customise the line format using the `line` codec like -[source,ruby] -output { - file { - path => ... - codec => line { format => "custom format: %{message}"} - } -} -The path to the file to write. Event fields can be used here, -like `/var/log/logstash/%{host}/%{application}` -One may also utilize the path option for date-based log -rotation via the joda time format. This will use the event -timestamp. -E.g.: `path => "./test-%{+YYYY-MM-dd}.txt"` to create -`./test-2013-05-29.txt` - -If you use an absolute path you cannot start with a dynamic string. -E.g: `/%{myfield}/`, `/test-%{myfield}/` are not valid paths - -[id="{version}-plugins-{type}s-{plugin}-spreadsheet_safe"] -===== `spreadsheet_safe` - - * Value type is <> - * Default value is `true` - -Option to not escape/munge string values. Please note turning off this option -may not make the values safe in your spreadsheet application - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/datadog-index.asciidoc b/docs/versioned-plugins/outputs/datadog-index.asciidoc deleted file mode 100644 index 0b3f5a51e..000000000 --- a/docs/versioned-plugins/outputs/datadog-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: datadog -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::datadog-v3.0.4.asciidoc[] -include::datadog-v3.0.3.asciidoc[] -include::datadog-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/datadog-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/datadog-v3.0.1.asciidoc deleted file mode 100644 index 60b6aca0d..000000000 --- a/docs/versioned-plugins/outputs/datadog-v3.0.1.asciidoc +++ /dev/null @@ -1,124 +0,0 @@ -:plugin: datadog -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-datadog/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Datadog output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Datadog Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-alert_type>> |<>, one of `["info", "error", "warning", "success"]`|No -| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-date_happened>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dd_tags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-priority>> |<>, one of `["normal", "low"]`|No -| <<{version}-plugins-{type}s-{plugin}-source_type_name>> |<>, one of `["nagios", "hudson", "jenkins", "user", "my apps", "feed", "chef", "puppet", "git", "bitbucket", "fabric", "capistrano"]`|No -| <<{version}-plugins-{type}s-{plugin}-text>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-title>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-alert_type"] -===== `alert_type` - - * Value can be any of: `info`, `error`, `warning`, `success` - * There is no default value for this setting. - -Alert type - -[id="{version}-plugins-{type}s-{plugin}-api_key"] -===== `api_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -This output lets you send events (for now. soon metrics) to -DataDogHQ based on Logstash events - -Note that since Logstash maintains no state -these will be one-shot events - -Your DatadogHQ API key - -[id="{version}-plugins-{type}s-{plugin}-date_happened"] -===== `date_happened` - - * Value type is <> - * There is no default value for this setting. - -Date Happened - -[id="{version}-plugins-{type}s-{plugin}-dd_tags"] -===== `dd_tags` - - * Value type is <> - * There is no default value for this setting. - -Tags -Set any custom tags for this event -Default are the Logstash tags if any - -[id="{version}-plugins-{type}s-{plugin}-priority"] -===== `priority` - - * Value can be any of: `normal`, `low` - * There is no default value for this setting. - -Priority - -[id="{version}-plugins-{type}s-{plugin}-source_type_name"] -===== `source_type_name` - - * Value can be any of: `nagios`, `hudson`, `jenkins`, `user`, `my apps`, `feed`, `chef`, `puppet`, `git`, `bitbucket`, `fabric`, `capistrano` - * Default value is `"my apps"` - -Source type name - -[id="{version}-plugins-{type}s-{plugin}-text"] -===== `text` - - * Value type is <> - * Default value is `"%{message}"` - -Text - -[id="{version}-plugins-{type}s-{plugin}-title"] -===== `title` - - * Value type is <> - * Default value is `"Logstash event for %{host}"` - -Title - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/datadog-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/datadog-v3.0.3.asciidoc deleted file mode 100644 index 4f9991b5b..000000000 --- a/docs/versioned-plugins/outputs/datadog-v3.0.3.asciidoc +++ /dev/null @@ -1,122 +0,0 @@ -:plugin: datadog -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-datadog/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Datadog output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output sends events to DataDogHQ based on Logstash events. - -Note that since Logstash maintains no state -these will be one-shot events - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Datadog Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-alert_type>> |<>, one of `["info", "error", "warning", "success"]`|No -| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-date_happened>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dd_tags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-priority>> |<>, one of `["normal", "low"]`|No -| <<{version}-plugins-{type}s-{plugin}-source_type_name>> |<>, one of `["nagios", "hudson", "jenkins", "user", "my apps", "feed", "chef", "puppet", "git", "bitbucket", "fabric", "capistrano"]`|No -| <<{version}-plugins-{type}s-{plugin}-text>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-title>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-alert_type"] -===== `alert_type` - - * Value can be any of: `info`, `error`, `warning`, `success` - * There is no default value for this setting. - -Alert type - -[id="{version}-plugins-{type}s-{plugin}-api_key"] -===== `api_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your DatadogHQ API key - -[id="{version}-plugins-{type}s-{plugin}-date_happened"] -===== `date_happened` - - * Value type is <> - * There is no default value for this setting. - -Date Happened - -[id="{version}-plugins-{type}s-{plugin}-dd_tags"] -===== `dd_tags` - - * Value type is <> - * There is no default value for this setting. - -Tags -Set any custom tags for this event -Default are the Logstash tags if any - -[id="{version}-plugins-{type}s-{plugin}-priority"] -===== `priority` - - * Value can be any of: `normal`, `low` - * There is no default value for this setting. - -Priority - -[id="{version}-plugins-{type}s-{plugin}-source_type_name"] -===== `source_type_name` - - * Value can be any of: `nagios`, `hudson`, `jenkins`, `user`, `my apps`, `feed`, `chef`, `puppet`, `git`, `bitbucket`, `fabric`, `capistrano` - * Default value is `"my apps"` - -Source type name - -[id="{version}-plugins-{type}s-{plugin}-text"] -===== `text` - - * Value type is <> - * Default value is `"%{message}"` - -Text - -[id="{version}-plugins-{type}s-{plugin}-title"] -===== `title` - - * Value type is <> - * Default value is `"Logstash event for %{host}"` - -Title - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/datadog-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/datadog-v3.0.4.asciidoc deleted file mode 100644 index 914cf87d5..000000000 --- a/docs/versioned-plugins/outputs/datadog-v3.0.4.asciidoc +++ /dev/null @@ -1,122 +0,0 @@ -:plugin: datadog -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-datadog/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Datadog output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output sends events to DataDogHQ based on Logstash events. - -Note that since Logstash maintains no state -these will be one-shot events - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Datadog Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-alert_type>> |<>, one of `["info", "error", "warning", "success"]`|No -| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-date_happened>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dd_tags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-priority>> |<>, one of `["normal", "low"]`|No -| <<{version}-plugins-{type}s-{plugin}-source_type_name>> |<>, one of `["nagios", "hudson", "jenkins", "user", "my apps", "feed", "chef", "puppet", "git", "bitbucket", "fabric", "capistrano"]`|No -| <<{version}-plugins-{type}s-{plugin}-text>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-title>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-alert_type"] -===== `alert_type` - - * Value can be any of: `info`, `error`, `warning`, `success` - * There is no default value for this setting. - -Alert type - -[id="{version}-plugins-{type}s-{plugin}-api_key"] -===== `api_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your DatadogHQ API key - -[id="{version}-plugins-{type}s-{plugin}-date_happened"] -===== `date_happened` - - * Value type is <> - * There is no default value for this setting. - -Date Happened - -[id="{version}-plugins-{type}s-{plugin}-dd_tags"] -===== `dd_tags` - - * Value type is <> - * There is no default value for this setting. - -Tags -Set any custom tags for this event -Default are the Logstash tags if any - -[id="{version}-plugins-{type}s-{plugin}-priority"] -===== `priority` - - * Value can be any of: `normal`, `low` - * There is no default value for this setting. - -Priority - -[id="{version}-plugins-{type}s-{plugin}-source_type_name"] -===== `source_type_name` - - * Value can be any of: `nagios`, `hudson`, `jenkins`, `user`, `my apps`, `feed`, `chef`, `puppet`, `git`, `bitbucket`, `fabric`, `capistrano` - * Default value is `"my apps"` - -Source type name - -[id="{version}-plugins-{type}s-{plugin}-text"] -===== `text` - - * Value type is <> - * Default value is `"%{message}"` - -Text - -[id="{version}-plugins-{type}s-{plugin}-title"] -===== `title` - - * Value type is <> - * Default value is `"Logstash event for %{host}"` - -Title - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/datadog_metrics-index.asciidoc b/docs/versioned-plugins/outputs/datadog_metrics-index.asciidoc deleted file mode 100644 index 9d03fa92a..000000000 --- a/docs/versioned-plugins/outputs/datadog_metrics-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: datadog_metrics -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-21 -| <> | 2017-06-23 -|======================================================================= - -include::datadog_metrics-v3.0.2.asciidoc[] -include::datadog_metrics-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/datadog_metrics-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/datadog_metrics-v3.0.1.asciidoc deleted file mode 100644 index a91b31451..000000000 --- a/docs/versioned-plugins/outputs/datadog_metrics-v3.0.1.asciidoc +++ /dev/null @@ -1,130 +0,0 @@ -:plugin: datadog_metrics -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-datadog_metrics/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Datadog_metrics output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you send metrics to -DataDogHQ based on Logstash events. -Default `queue_size` and `timeframe` are low in order to provide near realtime alerting. -If you do not use Datadog for alerting, consider raising these thresholds. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Datadog_metrics Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-dd_tags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-device>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metric_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metric_type>> |<>, one of `["gauge", "counter", "%{metric_type}"]`|No -| <<{version}-plugins-{type}s-{plugin}-metric_value>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeframe>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-api_key"] -===== `api_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your DatadogHQ API key. https://app.datadoghq.com/account/settings#api - -[id="{version}-plugins-{type}s-{plugin}-dd_tags"] -===== `dd_tags` - - * Value type is <> - * There is no default value for this setting. - -Set any custom tags for this event, -default are the Logstash tags if any. - -[id="{version}-plugins-{type}s-{plugin}-device"] -===== `device` - - * Value type is <> - * Default value is `"%{metric_device}"` - -The name of the device that produced the metric. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"%{host}"` - -The name of the host that produced the metric. - -[id="{version}-plugins-{type}s-{plugin}-metric_name"] -===== `metric_name` - - * Value type is <> - * Default value is `"%{metric_name}"` - -The name of the time series. - -[id="{version}-plugins-{type}s-{plugin}-metric_type"] -===== `metric_type` - - * Value can be any of: `gauge`, `counter`, `%{metric_type}` - * Default value is `"%{metric_type}"` - -The type of the metric. - -[id="{version}-plugins-{type}s-{plugin}-metric_value"] -===== `metric_value` - - * Value type is <> - * Default value is `"%{metric_value}"` - -The value. - -[id="{version}-plugins-{type}s-{plugin}-queue_size"] -===== `queue_size` - - * Value type is <> - * Default value is `10` - -How many events to queue before flushing to Datadog -prior to schedule set in `@timeframe` - -[id="{version}-plugins-{type}s-{plugin}-timeframe"] -===== `timeframe` - - * Value type is <> - * Default value is `10` - -How often (in seconds) to flush queued events to Datadog - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/datadog_metrics-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/datadog_metrics-v3.0.2.asciidoc deleted file mode 100644 index 454b6feac..000000000 --- a/docs/versioned-plugins/outputs/datadog_metrics-v3.0.2.asciidoc +++ /dev/null @@ -1,130 +0,0 @@ -:plugin: datadog_metrics -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-21 -:changelog_url: https://github.com/logstash-plugins/logstash-output-datadog_metrics/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Datadog_metrics output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you send metrics to -DataDogHQ based on Logstash events. -Default `queue_size` and `timeframe` are low in order to provide near realtime alerting. -If you do not use Datadog for alerting, consider raising these thresholds. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Datadog_metrics Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-dd_tags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-device>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metric_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metric_type>> |<>, one of `["gauge", "counter", "%{metric_type}"]`|No -| <<{version}-plugins-{type}s-{plugin}-metric_value>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-queue_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeframe>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-api_key"] -===== `api_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your DatadogHQ API key. https://app.datadoghq.com/account/settings#api - -[id="{version}-plugins-{type}s-{plugin}-dd_tags"] -===== `dd_tags` - - * Value type is <> - * There is no default value for this setting. - -Set any custom tags for this event, -default are the Logstash tags if any. - -[id="{version}-plugins-{type}s-{plugin}-device"] -===== `device` - - * Value type is <> - * Default value is `"%{metric_device}"` - -The name of the device that produced the metric. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"%{host}"` - -The name of the host that produced the metric. - -[id="{version}-plugins-{type}s-{plugin}-metric_name"] -===== `metric_name` - - * Value type is <> - * Default value is `"%{metric_name}"` - -The name of the time series. - -[id="{version}-plugins-{type}s-{plugin}-metric_type"] -===== `metric_type` - - * Value can be any of: `gauge`, `counter`, `%{metric_type}` - * Default value is `"%{metric_type}"` - -The type of the metric. - -[id="{version}-plugins-{type}s-{plugin}-metric_value"] -===== `metric_value` - - * Value type is <> - * Default value is `"%{metric_value}"` - -The value. - -[id="{version}-plugins-{type}s-{plugin}-queue_size"] -===== `queue_size` - - * Value type is <> - * Default value is `10` - -How many events to queue before flushing to Datadog -prior to schedule set in `@timeframe` - -[id="{version}-plugins-{type}s-{plugin}-timeframe"] -===== `timeframe` - - * Value type is <> - * Default value is `10` - -How often (in seconds) to flush queued events to Datadog - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/elasticsearch_java-index.asciidoc b/docs/versioned-plugins/outputs/elasticsearch_java-index.asciidoc deleted file mode 100644 index 26b49a183..000000000 --- a/docs/versioned-plugins/outputs/elasticsearch_java-index.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -:plugin: elasticsearch_java -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-06-23 -|======================================================================= - -include::elasticsearch_java-v2.1.4.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/elasticsearch_java-v2.1.4.asciidoc b/docs/versioned-plugins/outputs/elasticsearch_java-v2.1.4.asciidoc deleted file mode 100644 index 321b9895c..000000000 --- a/docs/versioned-plugins/outputs/elasticsearch_java-v2.1.4.asciidoc +++ /dev/null @@ -1,491 +0,0 @@ -:plugin: elasticsearch_java -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.1.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch_java/blob/v2.1.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Elasticsearch_java output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you store logs in Elasticsearch using the native 'node' and 'transport' -protocols. It is highly recommended to use the regular 'logstash-output-elasticsearch' output -which uses HTTP instead. This output is, in-fact, sometimes slower, and never faster than that one. -Additionally, upgrading your Elasticsearch cluster may require you to simultaneously update this -plugin for any protocol level changes. The HTTP client may be easier to work with due to wider -familiarity with HTTP. - -*VERSION NOTE*: Your Elasticsearch cluster must be running Elasticsearch 1.0.0 or later. - -If you want to set other Elasticsearch options that are not exposed directly -as configuration options, there are two methods: - -* Create an `elasticsearch.yml` file in the $PWD of the Logstash process -* Pass in es.* java properties (`java -Des.node.foo=` or `ruby -J-Des.node.foo=`) - -With the default `protocol` setting ("node"), this plugin will join your -Elasticsearch cluster as a client node, so it will show up in Elasticsearch's -cluster status. - -You can learn more about Elasticsearch at - -==== Operational Notes - -If using the default `protocol` setting ("node"), your firewalls might need -to permit port 9300 in *both* directions (from Logstash to Elasticsearch, and -Elasticsearch to Logstash) - -==== Retry Policy - -By default all bulk requests to ES are synchronous. Not all events in the bulk requests -always make it successfully. For example, there could be events which are not formatted -correctly for the index they are targeting (type mismatch in mapping). So that we minimize loss of -events, we have a specific retry policy in place. We retry all events which fail to be reached by -Elasticsearch for network related issues. We retry specific events which exhibit errors under a separate -policy described below. Events of this nature are ones which experience ES error codes described as -retryable errors. - -*Retryable Errors:* - -- 429, Too Many Requests (RFC6585) -- 503, The server is currently unable to handle the request due to a temporary overloading or maintenance of the server. - -Here are the rules of what is retried when: - -- Block and retry all events in bulk response that experiences transient network exceptions until - a successful submission is received by Elasticsearch. -- Retry subset of sent events which resulted in ES errors of a retryable nature which can be found - in RETRYABLE_CODES -- For events which returned retryable error codes, they will be pushed onto a separate queue for - retrying events. events in this queue will be retried a maximum of 5 times by default (configurable through :max_retries). The size of - this queue is capped by the value set in :retry_max_items. -- Events from the retry queue are submitted again either when the queue reaches its max size or when - the max interval time is reached, which is set in :retry_max_interval. -- Events which are not retryable or have reached their max retry count are logged to stderr. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Elasticsearch_java Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>, one of `["index", "delete", "create", "update", "create_unless_exists"]`|No -| <<{version}-plugins-{type}s-{plugin}-cluster>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-network_host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-node_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-protocol>> |<>, one of `["node", "transport"]`|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-transport_tcp_port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-action"] -===== `action` - - * Value can be any of: `index`, `delete`, `create`, `update`, `create_unless_exists` - * Default value is `"index"` - -The Elasticsearch action to perform. Valid actions are: - -- index: indexes a document (an event from Logstash). -- delete: deletes a document by id (An id is required for this action) -- create: indexes a document, fails if a document by that id already exists in the index. -- update: updates a document by id. Update has a special case where you can upsert -- update a - document if not already present. See the `upsert` option -- create_unless_exists: create the document unless it already exists, in which case do nothing. - -For more details on actions, check out the http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html[Elasticsearch bulk API documentation] - -[id="{version}-plugins-{type}s-{plugin}-cluster"] -===== `cluster` - - * Value type is <> - * There is no default value for this setting. - -The name of your cluster if you set it on the Elasticsearch side. Useful -for discovery when using `node` or `transport` protocols. -By default, it looks for a cluster named 'elasticsearch'. -Equivalent to the Elasticsearch option 'cluster.name' - -[id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] -===== `doc_as_upsert` - - * Value type is <> - * Default value is `false` - -Enable `doc_as_upsert` for update mode. -Create a new document with source if `document_id` doesn't exist in Elasticsearch - -[id="{version}-plugins-{type}s-{plugin}-document_id"] -===== `document_id` - - * Value type is <> - * There is no default value for this setting. - -The document ID for the index. Useful for overwriting existing entries in -Elasticsearch with the same ID. - -[id="{version}-plugins-{type}s-{plugin}-document_type"] -===== `document_type` - - * Value type is <> - * There is no default value for this setting. - -The document type to write events to. Generally you should try to write only -similar events to the same 'type'. String expansion `%{foo}` works here. -Unless you set 'document_type', the event 'type' will be used if it exists -otherwise the document type will be assigned the value of 'logs' - -[id="{version}-plugins-{type}s-{plugin}-flush_size"] -===== `flush_size` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-hosts"] -===== `hosts` - - * Value type is <> - * Default value is `[//127.0.0.1]` - -Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. -Remember the `http` protocol uses the http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-http.html#modules-http[http] address (eg. 9200, not 9300). - `"127.0.0.1"` - `["127.0.0.1:9200","127.0.0.2:9200"]` - `["http://127.0.0.1"]` - `["https://127.0.0.1:9200"]` - `["https://127.0.0.1:9200/mypath"]` (If using a proxy on a subpath) -It is important to exclude http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-node.html[dedicated master nodes] from the `hosts` list -to prevent LS from sending bulk requests to the master nodes. So this parameter should only reference either data or client nodes in Elasticsearch. - -Any special characters present in the URLs here MUST be URL escaped! This means `#` should be put in as `%23` for instance. - -[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] -===== `idle_flush_time` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `1` - - - -[id="{version}-plugins-{type}s-{plugin}-index"] -===== `index` - - * Value type is <> - * Default value is `"logstash-%{+YYYY.MM.dd}"` - -The index to write events to. This can be dynamic using the `%{foo}` syntax. -The default value will partition your indices by day so you can more easily -delete old data or only search specific date ranges. -Indexes may not contain uppercase characters. -For weekly indexes ISO 8601 format is recommended, eg. logstash-%{+xxxx.ww}. -LS uses Joda to format the index pattern from event timestamp. -Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/format/DateTimeFormat.html[here]. - -[id="{version}-plugins-{type}s-{plugin}-manage_template"] -===== `manage_template` - - * Value type is <> - * Default value is `true` - -From Logstash 1.3 onwards, a template is applied to Elasticsearch during -Logstash's startup if one with the name `template_name` does not already exist. -By default, the contents of this template is the default template for -`logstash-%{+YYYY.MM.dd}` which always matches indices based on the pattern -`logstash-*`. Should you require support for other index names, or would like -to change the mappings in the template in general, a custom template can be -specified by setting `template` to the path of a template file. - -Setting `manage_template` to false disables this feature. If you require more -control over template creation, (e.g. creating indices dynamically based on -field names) you should set `manage_template` to false and use the REST -API to apply your templates manually. - -[id="{version}-plugins-{type}s-{plugin}-max_inflight_requests"] -===== `max_inflight_requests` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `50` - -This setting no longer does anything. It exists to keep config validation -from failing. It will be removed in future versions. - -[id="{version}-plugins-{type}s-{plugin}-network_host"] -===== `network_host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name/address of the host to bind to for Elasticsearch clustering. Equivalent to the Elasticsearch option 'network.host' -option. -This MUST be set for either protocol to work (node or transport)! The internal Elasticsearch node -will bind to this ip. This ip MUST be reachable by all nodes in the Elasticsearch cluster - -[id="{version}-plugins-{type}s-{plugin}-node_name"] -===== `node_name` - - * Value type is <> - * There is no default value for this setting. - -The node name Elasticsearch will use when joining a cluster. - -By default, this is generated internally by the ES client. - -[id="{version}-plugins-{type}s-{plugin}-parent"] -===== `parent` - - * Value type is <> - * Default value is `nil` - -For child documents, ID of the associated parent. -This can be dynamic using the `%{foo}` syntax. - -[id="{version}-plugins-{type}s-{plugin}-pipeline"] -===== `pipeline` - - * Value type is <> - * Default value is `nil` - -Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration -here like `pipeline => "%{INGEST_PIPELINE}"` - -[id="{version}-plugins-{type}s-{plugin}-protocol"] -===== `protocol` - - * Value can be any of: `node`, `transport` - * Default value is `"transport"` - -Choose the protocol used to talk to Elasticsearch. - -The 'node' protocol (default) will connect to the cluster as a normal Elasticsearch -node (but will not store data). If you use the `node` protocol, you must permit -bidirectional communication on the port 9300 (or whichever port you have -configured). - -If you do not specify the `host` parameter, it will use multicast for http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-discovery-zen.html[Elasticsearch discovery]. While this may work in a test/dev environment where multicast is enabled in -Elasticsearch, we strongly recommend http://www.elastic.co/guide/en/elasticsearch/guide/current/important-configuration-changes.html#unicast[using unicast] -in Elasticsearch. To connect to an Elasticsearch cluster with unicast, -you must include the `host` parameter (see relevant section above). - -The 'transport' protocol will connect to the host you specify and will -not show up as a 'node' in the Elasticsearch cluster. This is useful -in situations where you cannot permit connections outbound from the -Elasticsearch cluster to this Logstash server. - -All protocols will use bulk requests when talking to Elasticsearch. - -[id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] -===== `retry_initial_interval` - - * Value type is <> - * Default value is `2` - -Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` - -[id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] -===== `retry_max_interval` - - * Value type is <> - * Default value is `64` - -Set max interval in seconds between bulk retries. - -[id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] -===== `retry_on_conflict` - - * Value type is <> - * Default value is `1` - -The number of times Elasticsearch should internally retry an update/upserted document -See the https://www.elastic.co/guide/en/elasticsearch/guide/current/partial-updates.html[partial updates] -for more info - -[id="{version}-plugins-{type}s-{plugin}-routing"] -===== `routing` - - * Value type is <> - * There is no default value for this setting. - -A routing override to be applied to all processed events. -This can be dynamic using the `%{foo}` syntax. - -[id="{version}-plugins-{type}s-{plugin}-script"] -===== `script` - - * Value type is <> - * Default value is `""` - -Set script name for scripted update mode - -[id="{version}-plugins-{type}s-{plugin}-script_lang"] -===== `script_lang` - - * Value type is <> - * Default value is `"painless"` - -Set the language of the used script. If not set, this defaults to painless in ES 5.0 - -[id="{version}-plugins-{type}s-{plugin}-script_type"] -===== `script_type` - - * Value can be any of: `inline`, `indexed`, `file` - * Default value is `["inline"]` - -Define the type of script referenced by "script" variable - inline : "script" contains inline script - indexed : "script" contains the name of script directly indexed in elasticsearch - file : "script" contains the name of script stored in elasticseach's config directory - -[id="{version}-plugins-{type}s-{plugin}-script_var_name"] -===== `script_var_name` - - * Value type is <> - * Default value is `"event"` - -Set variable name passed to script (scripted update) - -[id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] -===== `scripted_upsert` - - * Value type is <> - * Default value is `false` - -if enabled, script is in charge of creating non-existent document (scripted update) - -[id="{version}-plugins-{type}s-{plugin}-sniffing"] -===== `sniffing` - - * Value type is <> - * Default value is `false` - -Enable cluster sniffing (transport only). -Asks host for the list of all cluster nodes and adds them to the hosts list -Equivalent to the Elasticsearch option 'client.transport.sniff' - -[id="{version}-plugins-{type}s-{plugin}-template"] -===== `template` - - * Value type is <> - * There is no default value for this setting. - -You can set the path to your own template here, if you so desire. -If not set, the included template will be used. - -[id="{version}-plugins-{type}s-{plugin}-template_name"] -===== `template_name` - - * Value type is <> - * Default value is `"logstash"` - -This configuration option defines how the template is named inside Elasticsearch. -Note that if you have used the template management features and subsequently -change this, you will need to prune the old template manually, e.g. - -`curl -XDELETE ` - -where `OldTemplateName` is whatever the former setting was. - -[id="{version}-plugins-{type}s-{plugin}-template_overwrite"] -===== `template_overwrite` - - * Value type is <> - * Default value is `false` - -The template_overwrite option will always overwrite the indicated template -in Elasticsearch with either the one indicated by template or the included one. -This option is set to false by default. If you always want to stay up to date -with the template provided by Logstash, this option could be very useful to you. -Likewise, if you have your own template file managed by puppet, for example, and -you wanted to be able to update it regularly, this option could help there as well. - -Please note that if you are using your own customized version of the Logstash -template (logstash), setting this to true will make Logstash to overwrite -the "logstash" template (i.e. removing all customized settings) - -[id="{version}-plugins-{type}s-{plugin}-transport_tcp_port"] -===== `transport_tcp_port` - - * Value type is <> - * There is no default value for this setting. - -This sets the local port to bind to. Equivalent to the Elasticsrearch option 'transport.tcp.port' - -[id="{version}-plugins-{type}s-{plugin}-upsert"] -===== `upsert` - - * Value type is <> - * Default value is `""` - -Set upsert content for update mode.s -Create a new document with this parameter as json string if `document_id` doesn't exists - -[id="{version}-plugins-{type}s-{plugin}-version"] -===== `version` - - * Value type is <> - * There is no default value for this setting. - -The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. -See https://www.elastic.co/blog/elasticsearch-versioning-support. - -[id="{version}-plugins-{type}s-{plugin}-version_type"] -===== `version_type` - - * Value can be any of: `internal`, `external`, `external_gt`, `external_gte`, `force` - * There is no default value for this setting. - -The version_type to use for indexing. -See https://www.elastic.co/blog/elasticsearch-versioning-support. -See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/example-index.asciidoc b/docs/versioned-plugins/outputs/example-index.asciidoc deleted file mode 100644 index 17979309d..000000000 --- a/docs/versioned-plugins/outputs/example-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: example -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/outputs/exec-index.asciidoc b/docs/versioned-plugins/outputs/exec-index.asciidoc deleted file mode 100644 index a2cf35e1e..000000000 --- a/docs/versioned-plugins/outputs/exec-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: exec -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-21 -| <> | 2017-06-23 -|======================================================================= - -include::exec-v3.1.3.asciidoc[] -include::exec-v3.1.2.asciidoc[] -include::exec-v3.1.1.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/exec-v3.1.1.asciidoc b/docs/versioned-plugins/outputs/exec-v3.1.1.asciidoc deleted file mode 100644 index d44f4b669..000000000 --- a/docs/versioned-plugins/outputs/exec-v3.1.1.asciidoc +++ /dev/null @@ -1,86 +0,0 @@ -:plugin: exec -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-exec/blob/v3.1.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Exec output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The exec output will run a command for each event received. Ruby's -`system()` function will be used, i.e. the command string will -be passed to a shell. You can use `%{name}` and other dynamic strings -in the command to pass select fields from the event to the child -process. Example: -[source,ruby] - output { - if [type] == "abuse" { - exec { - command => "iptables -A INPUT -s %{clientip} -j DROP" - } - } - } - -WARNING: If you want it non-blocking you should use `&` or `dtach` -or other such techniques. There is no timeout for the commands being -run so misbehaving commands could otherwise stall the Logstash -pipeline indefinitely. - -WARNING: Exercise great caution with `%{name}` field placeholders. -The contents of the field will be included verbatim without any -sanitization, i.e. any shell metacharacters from the field values -will be passed straight to the shell. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Exec Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-quiet>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-command"] -===== `command` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Command line to execute via subprocess. Use `dtach` or `screen` to -make it non blocking. This value can include `%{name}` and other -dynamic strings. - -[id="{version}-plugins-{type}s-{plugin}-quiet"] -===== `quiet` - - * Value type is <> - * Default value is `false` - -display the result of the command to the terminal - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/exec-v3.1.2.asciidoc b/docs/versioned-plugins/outputs/exec-v3.1.2.asciidoc deleted file mode 100644 index 2679ebee6..000000000 --- a/docs/versioned-plugins/outputs/exec-v3.1.2.asciidoc +++ /dev/null @@ -1,86 +0,0 @@ -:plugin: exec -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.2 -:release_date: 2017-08-21 -:changelog_url: https://github.com/logstash-plugins/logstash-output-exec/blob/v3.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Exec output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The exec output will run a command for each event received. Ruby's -`system()` function will be used, i.e. the command string will -be passed to a shell. You can use `%{name}` and other dynamic strings -in the command to pass select fields from the event to the child -process. Example: -[source,ruby] - output { - if [type] == "abuse" { - exec { - command => "iptables -A INPUT -s %{clientip} -j DROP" - } - } - } - -WARNING: If you want it non-blocking you should use `&` or `dtach` -or other such techniques. There is no timeout for the commands being -run so misbehaving commands could otherwise stall the Logstash -pipeline indefinitely. - -WARNING: Exercise great caution with `%{name}` field placeholders. -The contents of the field will be included verbatim without any -sanitization, i.e. any shell metacharacters from the field values -will be passed straight to the shell. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Exec Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-quiet>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-command"] -===== `command` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Command line to execute via subprocess. Use `dtach` or `screen` to -make it non blocking. This value can include `%{name}` and other -dynamic strings. - -[id="{version}-plugins-{type}s-{plugin}-quiet"] -===== `quiet` - - * Value type is <> - * Default value is `false` - -display the result of the command to the terminal - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/exec-v3.1.3.asciidoc b/docs/versioned-plugins/outputs/exec-v3.1.3.asciidoc deleted file mode 100644 index 18c1732a4..000000000 --- a/docs/versioned-plugins/outputs/exec-v3.1.3.asciidoc +++ /dev/null @@ -1,86 +0,0 @@ -:plugin: exec -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-exec/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Exec output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The exec output will run a command for each event received. Ruby's -`system()` function will be used, i.e. the command string will -be passed to a shell. You can use `%{name}` and other dynamic strings -in the command to pass select fields from the event to the child -process. Example: -[source,ruby] - output { - if [type] == "abuse" { - exec { - command => "iptables -A INPUT -s %{clientip} -j DROP" - } - } - } - -WARNING: If you want it non-blocking you should use `&` or `dtach` -or other such techniques. There is no timeout for the commands being -run so misbehaving commands could otherwise stall the Logstash -pipeline indefinitely. - -WARNING: Exercise great caution with `%{name}` field placeholders. -The contents of the field will be included verbatim without any -sanitization, i.e. any shell metacharacters from the field values -will be passed straight to the shell. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Exec Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-quiet>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-command"] -===== `command` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Command line to execute via subprocess. Use `dtach` or `screen` to -make it non blocking. This value can include `%{name}` and other -dynamic strings. - -[id="{version}-plugins-{type}s-{plugin}-quiet"] -===== `quiet` - - * Value type is <> - * Default value is `false` - -display the result of the command to the terminal - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/firehose-index.asciidoc b/docs/versioned-plugins/outputs/firehose-index.asciidoc deleted file mode 100644 index 69f53d5ee..000000000 --- a/docs/versioned-plugins/outputs/firehose-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: firehose -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/outputs/ganglia-index.asciidoc b/docs/versioned-plugins/outputs/ganglia-index.asciidoc deleted file mode 100644 index 01bf6bd0d..000000000 --- a/docs/versioned-plugins/outputs/ganglia-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: ganglia -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::ganglia-v3.0.5.asciidoc[] -include::ganglia-v3.0.4.asciidoc[] -include::ganglia-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/ganglia-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/ganglia-v3.0.3.asciidoc deleted file mode 100644 index 5a96f9d34..000000000 --- a/docs/versioned-plugins/outputs/ganglia-v3.0.3.asciidoc +++ /dev/null @@ -1,139 +0,0 @@ -:plugin: ganglia -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-ganglia/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Ganglia output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output allows you to pull metrics from your logs and ship them to -ganglia's gmond. This is heavily based on the graphite output. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Ganglia Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-group>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lifetime>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metric>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-metric_type>> |<>, one of `["string", "int8", "uint8", "int16", "uint16", "int32", "uint32", "float", "double"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-slope>> |<>, one of `["zero", "positive", "negative", "both", "unspecified"]`|No -| <<{version}-plugins-{type}s-{plugin}-units>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-value>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-group"] -===== `group` - - * Value type is <> - * Default value is `""` - -Metric group - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The address of the ganglia server. - -[id="{version}-plugins-{type}s-{plugin}-lifetime"] -===== `lifetime` - - * Value type is <> - * Default value is `300` - -Lifetime in seconds of this metric - -[id="{version}-plugins-{type}s-{plugin}-max_interval"] -===== `max_interval` - - * Value type is <> - * Default value is `60` - -Maximum time in seconds between gmetric calls for this metric. - -[id="{version}-plugins-{type}s-{plugin}-metric"] -===== `metric` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The metric to use. This supports dynamic strings like `%{host}` - -[id="{version}-plugins-{type}s-{plugin}-metric_type"] -===== `metric_type` - - * Value can be any of: `string`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `uint32`, `float`, `double` - * Default value is `"uint8"` - -The type of value for this metric. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `8649` - -The port to connect on your ganglia server. - -[id="{version}-plugins-{type}s-{plugin}-slope"] -===== `slope` - - * Value can be any of: `zero`, `positive`, `negative`, `both`, `unspecified` - * Default value is `"both"` - -Metric slope, represents metric behavior - -[id="{version}-plugins-{type}s-{plugin}-units"] -===== `units` - - * Value type is <> - * Default value is `""` - -Gmetric units for metric, such as "kb/sec" or "ms" or whatever unit -this metric uses. - -[id="{version}-plugins-{type}s-{plugin}-value"] -===== `value` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The value to use. This supports dynamic strings like `%{bytes}` -It will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/ganglia-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/ganglia-v3.0.4.asciidoc deleted file mode 100644 index 46fe80ea4..000000000 --- a/docs/versioned-plugins/outputs/ganglia-v3.0.4.asciidoc +++ /dev/null @@ -1,139 +0,0 @@ -:plugin: ganglia -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-ganglia/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Ganglia output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output allows you to pull metrics from your logs and ship them to -ganglia's gmond. This is heavily based on the graphite output. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Ganglia Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-group>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lifetime>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metric>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-metric_type>> |<>, one of `["string", "int8", "uint8", "int16", "uint16", "int32", "uint32", "float", "double"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-slope>> |<>, one of `["zero", "positive", "negative", "both", "unspecified"]`|No -| <<{version}-plugins-{type}s-{plugin}-units>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-value>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-group"] -===== `group` - - * Value type is <> - * Default value is `""` - -Metric group - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The address of the ganglia server. - -[id="{version}-plugins-{type}s-{plugin}-lifetime"] -===== `lifetime` - - * Value type is <> - * Default value is `300` - -Lifetime in seconds of this metric - -[id="{version}-plugins-{type}s-{plugin}-max_interval"] -===== `max_interval` - - * Value type is <> - * Default value is `60` - -Maximum time in seconds between gmetric calls for this metric. - -[id="{version}-plugins-{type}s-{plugin}-metric"] -===== `metric` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The metric to use. This supports dynamic strings like `%{host}` - -[id="{version}-plugins-{type}s-{plugin}-metric_type"] -===== `metric_type` - - * Value can be any of: `string`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `uint32`, `float`, `double` - * Default value is `"uint8"` - -The type of value for this metric. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `8649` - -The port to connect on your ganglia server. - -[id="{version}-plugins-{type}s-{plugin}-slope"] -===== `slope` - - * Value can be any of: `zero`, `positive`, `negative`, `both`, `unspecified` - * Default value is `"both"` - -Metric slope, represents metric behavior - -[id="{version}-plugins-{type}s-{plugin}-units"] -===== `units` - - * Value type is <> - * Default value is `""` - -Gmetric units for metric, such as "kb/sec" or "ms" or whatever unit -this metric uses. - -[id="{version}-plugins-{type}s-{plugin}-value"] -===== `value` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The value to use. This supports dynamic strings like `%{bytes}` -It will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/ganglia-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/ganglia-v3.0.5.asciidoc deleted file mode 100644 index f363aef57..000000000 --- a/docs/versioned-plugins/outputs/ganglia-v3.0.5.asciidoc +++ /dev/null @@ -1,139 +0,0 @@ -:plugin: ganglia -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-ganglia/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Ganglia output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output allows you to pull metrics from your logs and ship them to -ganglia's gmond. This is heavily based on the graphite output. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Ganglia Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-group>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lifetime>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metric>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-metric_type>> |<>, one of `["string", "int8", "uint8", "int16", "uint16", "int32", "uint32", "float", "double"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-slope>> |<>, one of `["zero", "positive", "negative", "both", "unspecified"]`|No -| <<{version}-plugins-{type}s-{plugin}-units>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-value>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-group"] -===== `group` - - * Value type is <> - * Default value is `""` - -Metric group - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The address of the ganglia server. - -[id="{version}-plugins-{type}s-{plugin}-lifetime"] -===== `lifetime` - - * Value type is <> - * Default value is `300` - -Lifetime in seconds of this metric - -[id="{version}-plugins-{type}s-{plugin}-max_interval"] -===== `max_interval` - - * Value type is <> - * Default value is `60` - -Maximum time in seconds between gmetric calls for this metric. - -[id="{version}-plugins-{type}s-{plugin}-metric"] -===== `metric` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The metric to use. This supports dynamic strings like `%{host}` - -[id="{version}-plugins-{type}s-{plugin}-metric_type"] -===== `metric_type` - - * Value can be any of: `string`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `uint32`, `float`, `double` - * Default value is `"uint8"` - -The type of value for this metric. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `8649` - -The port to connect on your ganglia server. - -[id="{version}-plugins-{type}s-{plugin}-slope"] -===== `slope` - - * Value can be any of: `zero`, `positive`, `negative`, `both`, `unspecified` - * Default value is `"both"` - -Metric slope, represents metric behavior - -[id="{version}-plugins-{type}s-{plugin}-units"] -===== `units` - - * Value type is <> - * Default value is `""` - -Gmetric units for metric, such as "kb/sec" or "ms" or whatever unit -this metric uses. - -[id="{version}-plugins-{type}s-{plugin}-value"] -===== `value` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The value to use. This supports dynamic strings like `%{bytes}` -It will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/gelf-index.asciidoc b/docs/versioned-plugins/outputs/gelf-index.asciidoc deleted file mode 100644 index 3c45e5693..000000000 --- a/docs/versioned-plugins/outputs/gelf-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: gelf -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::gelf-v3.1.4.asciidoc[] -include::gelf-v3.1.3.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/gelf-v3.1.3.asciidoc b/docs/versioned-plugins/outputs/gelf-v3.1.3.asciidoc deleted file mode 100644 index 3795bfa9d..000000000 --- a/docs/versioned-plugins/outputs/gelf-v3.1.3.asciidoc +++ /dev/null @@ -1,167 +0,0 @@ -:plugin: gelf -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-gelf/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Gelf output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output generates messages in GELF format. This is most useful if you -want to use Logstash to output events to Graylog2. - -More information at http://graylog2.org/gelf#specs[The Graylog2 GELF specs page] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Gelf Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-chunksize>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-custom_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-full_message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ignore_metadata>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-level>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ship_metadata>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ship_tags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-short_message>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-chunksize"] -===== `chunksize` - - * Value type is <> - * Default value is `1420` - -The GELF chunksize. You usually don't need to change this. - -[id="{version}-plugins-{type}s-{plugin}-custom_fields"] -===== `custom_fields` - - * Value type is <> - * Default value is `{}` - -The GELF custom field mappings. GELF supports arbitrary attributes as custom -fields. This exposes that. Exclude the `_` portion of the field name -e.g. `custom_fields => ['foo_field', 'some_value']` -sets `_foo_field` = `some_value`. - -[id="{version}-plugins-{type}s-{plugin}-full_message"] -===== `full_message` - - * Value type is <> - * Default value is `"%{message}"` - -The GELF full message. Dynamic values like `%{foo}` are permitted here. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Graylog2 server IP address or hostname. - -[id="{version}-plugins-{type}s-{plugin}-ignore_metadata"] -===== `ignore_metadata` - - * Value type is <> - * Default value is `["@timestamp", "@version", "severity", "host", "source_host", "source_path", "short_message"]` - -Ignore these fields when `ship_metadata` is set. Typically this lists the -fields used in dynamic values for GELF fields. - -[id="{version}-plugins-{type}s-{plugin}-level"] -===== `level` - - * Value type is <> - * Default value is `["%{severity}", "INFO"]` - -The GELF message level. Dynamic values like `%{level}` are permitted here; -useful if you want to parse the 'log level' from an event and use that -as the GELF level/severity. - -Values here can be integers [0..7] inclusive or any of -"debug", "info", "warn", "error", "fatal" (case insensitive). -Single-character versions of these are also valid, "d", "i", "w", "e", "f", -"u" -The following additional severity\_labels from Logstash's syslog\_pri filter -are accepted: "emergency", "alert", "critical", "warning", "notice", and -"informational". - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `12201` - -Graylog2 server port number. - -[id="{version}-plugins-{type}s-{plugin}-sender"] -===== `sender` - - * Value type is <> - * Default value is `"%{host}"` - -Allow overriding of the GELF `sender` field. This is useful if you -want to use something other than the event's source host as the -"sender" of an event. A common case for this is using the application name -instead of the hostname. - -[id="{version}-plugins-{type}s-{plugin}-ship_metadata"] -===== `ship_metadata` - - * Value type is <> - * Default value is `true` - -Should Logstash ship metadata within event object? This will cause Logstash -to ship any fields in the event (such as those created by grok) in the GELF -messages. These will be sent as underscored "additional fields". - -[id="{version}-plugins-{type}s-{plugin}-ship_tags"] -===== `ship_tags` - - * Value type is <> - * Default value is `true` - -Ship tags within events. This will cause Logstash to ship the tags of an -event as the field `\_tags`. - -[id="{version}-plugins-{type}s-{plugin}-short_message"] -===== `short_message` - - * Value type is <> - * Default value is `"short_message"` - -The GELF short message field name. If the field does not exist or is empty, -the event message is taken instead. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/gelf-v3.1.4.asciidoc b/docs/versioned-plugins/outputs/gelf-v3.1.4.asciidoc deleted file mode 100644 index 68db01129..000000000 --- a/docs/versioned-plugins/outputs/gelf-v3.1.4.asciidoc +++ /dev/null @@ -1,167 +0,0 @@ -:plugin: gelf -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.4 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-gelf/blob/v3.1.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Gelf output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output generates messages in GELF format. This is most useful if you -want to use Logstash to output events to Graylog2. - -More information at http://graylog2.org/gelf#specs[The Graylog2 GELF specs page] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Gelf Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-chunksize>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-custom_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-full_message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ignore_metadata>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-level>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ship_metadata>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ship_tags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-short_message>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-chunksize"] -===== `chunksize` - - * Value type is <> - * Default value is `1420` - -The GELF chunksize. You usually don't need to change this. - -[id="{version}-plugins-{type}s-{plugin}-custom_fields"] -===== `custom_fields` - - * Value type is <> - * Default value is `{}` - -The GELF custom field mappings. GELF supports arbitrary attributes as custom -fields. This exposes that. Exclude the `_` portion of the field name -e.g. `custom_fields => ['foo_field', 'some_value']` -sets `_foo_field` = `some_value`. - -[id="{version}-plugins-{type}s-{plugin}-full_message"] -===== `full_message` - - * Value type is <> - * Default value is `"%{message}"` - -The GELF full message. Dynamic values like `%{foo}` are permitted here. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Graylog2 server IP address or hostname. - -[id="{version}-plugins-{type}s-{plugin}-ignore_metadata"] -===== `ignore_metadata` - - * Value type is <> - * Default value is `["@timestamp", "@version", "severity", "host", "source_host", "source_path", "short_message"]` - -Ignore these fields when `ship_metadata` is set. Typically this lists the -fields used in dynamic values for GELF fields. - -[id="{version}-plugins-{type}s-{plugin}-level"] -===== `level` - - * Value type is <> - * Default value is `["%{severity}", "INFO"]` - -The GELF message level. Dynamic values like `%{level}` are permitted here; -useful if you want to parse the 'log level' from an event and use that -as the GELF level/severity. - -Values here can be integers [0..7] inclusive or any of -"debug", "info", "warn", "error", "fatal" (case insensitive). -Single-character versions of these are also valid, "d", "i", "w", "e", "f", -"u" -The following additional severity\_labels from Logstash's syslog\_pri filter -are accepted: "emergency", "alert", "critical", "warning", "notice", and -"informational". - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `12201` - -Graylog2 server port number. - -[id="{version}-plugins-{type}s-{plugin}-sender"] -===== `sender` - - * Value type is <> - * Default value is `"%{host}"` - -Allow overriding of the GELF `sender` field. This is useful if you -want to use something other than the event's source host as the -"sender" of an event. A common case for this is using the application name -instead of the hostname. - -[id="{version}-plugins-{type}s-{plugin}-ship_metadata"] -===== `ship_metadata` - - * Value type is <> - * Default value is `true` - -Should Logstash ship metadata within event object? This will cause Logstash -to ship any fields in the event (such as those created by grok) in the GELF -messages. These will be sent as underscored "additional fields". - -[id="{version}-plugins-{type}s-{plugin}-ship_tags"] -===== `ship_tags` - - * Value type is <> - * Default value is `true` - -Ship tags within events. This will cause Logstash to ship the tags of an -event as the field `\_tags`. - -[id="{version}-plugins-{type}s-{plugin}-short_message"] -===== `short_message` - - * Value type is <> - * Default value is `"short_message"` - -The GELF short message field name. If the field does not exist or is empty, -the event message is taken instead. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/gemfire-index.asciidoc b/docs/versioned-plugins/outputs/gemfire-index.asciidoc deleted file mode 100644 index 4feed9525..000000000 --- a/docs/versioned-plugins/outputs/gemfire-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: gemfire -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::gemfire-v2.0.6.asciidoc[] -include::gemfire-v2.0.5.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/gemfire-v2.0.5.asciidoc b/docs/versioned-plugins/outputs/gemfire-v2.0.5.asciidoc deleted file mode 100644 index 5a4dd14f8..000000000 --- a/docs/versioned-plugins/outputs/gemfire-v2.0.5.asciidoc +++ /dev/null @@ -1,100 +0,0 @@ -:plugin: gemfire -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-gemfire/blob/v2.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Gemfire output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push events to a GemFire region. - -GemFire is an object database. - -To use this plugin you need to add gemfire.jar to your CLASSPATH; -using format=json requires jackson.jar too. - -Note: this plugin has only been tested with GemFire 7.0. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Gemfire Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cache_xml_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region_name>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_name"] -===== `cache_name` - - * Value type is <> - * Default value is `"logstash"` - -Your client cache name - -[id="{version}-plugins-{type}s-{plugin}-cache_xml_file"] -===== `cache_xml_file` - - * Value type is <> - * Default value is `nil` - -The path to a GemFire client cache XML file. - -Example: -[source,xml] - - - - - - - - - - - -[id="{version}-plugins-{type}s-{plugin}-key_format"] -===== `key_format` - - * Value type is <> - * Default value is `"%{host}-%{@timestamp}"` - -A sprintf format to use when building keys - -[id="{version}-plugins-{type}s-{plugin}-region_name"] -===== `region_name` - - * Value type is <> - * Default value is `"Logstash"` - -The region name - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/gemfire-v2.0.6.asciidoc b/docs/versioned-plugins/outputs/gemfire-v2.0.6.asciidoc deleted file mode 100644 index 457873493..000000000 --- a/docs/versioned-plugins/outputs/gemfire-v2.0.6.asciidoc +++ /dev/null @@ -1,100 +0,0 @@ -:plugin: gemfire -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.6 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-gemfire/blob/v2.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Gemfire output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push events to a GemFire region. - -GemFire is an object database. - -To use this plugin you need to add gemfire.jar to your CLASSPATH; -using format=json requires jackson.jar too. - -Note: this plugin has only been tested with GemFire 7.0. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Gemfire Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cache_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cache_xml_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region_name>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-cache_name"] -===== `cache_name` - - * Value type is <> - * Default value is `"logstash"` - -Your client cache name - -[id="{version}-plugins-{type}s-{plugin}-cache_xml_file"] -===== `cache_xml_file` - - * Value type is <> - * Default value is `nil` - -The path to a GemFire client cache XML file. - -Example: -[source,xml] - - - - - - - - - - - -[id="{version}-plugins-{type}s-{plugin}-key_format"] -===== `key_format` - - * Value type is <> - * Default value is `"%{host}-%{@timestamp}"` - -A sprintf format to use when building keys - -[id="{version}-plugins-{type}s-{plugin}-region_name"] -===== `region_name` - - * Value type is <> - * Default value is `"Logstash"` - -The region name - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/google_bigquery-index.asciidoc b/docs/versioned-plugins/outputs/google_bigquery-index.asciidoc deleted file mode 100644 index 5c4e759aa..000000000 --- a/docs/versioned-plugins/outputs/google_bigquery-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: google_bigquery -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::google_bigquery-v3.2.3.asciidoc[] -include::google_bigquery-v3.2.2.asciidoc[] -include::google_bigquery-v3.2.1.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/google_bigquery-v3.2.1.asciidoc b/docs/versioned-plugins/outputs/google_bigquery-v3.2.1.asciidoc deleted file mode 100644 index 6bcdbd41f..000000000 --- a/docs/versioned-plugins/outputs/google_bigquery-v3.2.1.asciidoc +++ /dev/null @@ -1,302 +0,0 @@ -:plugin: google_bigquery -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-google_bigquery/blob/v3.2.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Google_bigquery output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -[source,txt] ------ -Author: Rodrigo De Castro -Date: 2013-09-20 - -Copyright 2013 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------ -Summary: plugin to upload log events to Google BigQuery (BQ), rolling -files based on the date pattern provided as a configuration setting. Events -are written to files locally and, once file is closed, this plugin uploads -it to the configured BigQuery dataset. - -VERY IMPORTANT: -. To make good use of BigQuery, your log events should be parsed and -structured. Consider using grok to parse your events into fields that can -be uploaded to BQ. -. You must configure your plugin so it gets events with the same structure, -so the BigQuery schema suits them. In case you want to upload log events -with different structures, you can utilize multiple configuration blocks, -separating different log events with Logstash conditionals. More details on -Logstash conditionals can be found here: -http://logstash.net/docs/1.2.1/configuration#conditionals - -For more info on Google BigQuery, please go to: -https://developers.google.com/bigquery/ - -In order to use this plugin, a Google service account must be used. For -more information, please refer to: -https://developers.google.com/storage/docs/authentication#service_accounts - -Recommendations: - -. Experiment with the settings depending on how much log data you generate, -your needs to see "fresh" data, and how much data you could lose in the event -of crash. For instance, if you want to see recent data in BQ quickly, you -could configure the plugin to upload data every minute or so (provided you -have enough log events to justify that). Note also, that if uploads are too -frequent, there is no guarantee that they will be imported in the same order, -so later data may be available before earlier data. - -. BigQuery charges for storage and for queries, depending on how much data -it reads to perform a query. These are other aspects to consider when -considering the date pattern which will be used to create new tables and also -how to compose the queries when using BQ. For more info on BigQuery Pricing, -please access: -https://developers.google.com/bigquery/pricing - -USAGE: -This is an example of logstash config: - -[source,json] --------------------------- -output { - google_bigquery { - project_id => "folkloric-guru-278" (required) - dataset => "logs" (required) - csv_schema => "path:STRING,status:INTEGER,score:FLOAT" (required) <1> - key_path => "/path/to/privatekey.p12" (required) - key_password => "notasecret" (optional) - service_account => "1234@developer.gserviceaccount.com" (required) - temp_directory => "/tmp/logstash-bq" (optional) - temp_file_prefix => "logstash_bq" (optional) - date_pattern => "%Y-%m-%dT%H:00" (optional) - flush_interval_secs => 2 (optional) - uploader_interval_secs => 60 (optional) - deleter_interval_secs => 60 (optional) - } -} --------------------------- - -<1> Specify either a csv_schema or a json_schema. - -* Refactor common code between Google BQ and GCS plugins. -* Turn Google API code into a Plugin Mixin (like AwsConfig). -* There's no recover method, so if logstash/plugin crashes, files may not -be uploaded to BQ. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Google_bigquery Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-csv_schema>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dataset>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-date_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-deleter_interval_secs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_interval_secs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ignore_unknown_values>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-json_schema>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-service_account>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-table_prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-table_separator>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-temp_directory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-temp_file_prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-uploader_interval_secs>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-csv_schema"] -===== `csv_schema` - - * Value type is <> - * Default value is `nil` - -Schema for log data. It must follow this format: -:,:,... -Example: path:STRING,status:INTEGER,score:FLOAT - -[id="{version}-plugins-{type}s-{plugin}-dataset"] -===== `dataset` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -BigQuery dataset to which these events will be added to. - -[id="{version}-plugins-{type}s-{plugin}-date_pattern"] -===== `date_pattern` - - * Value type is <> - * Default value is `"%Y-%m-%dT%H:00"` - -Time pattern for BigQuery table, defaults to hourly tables. -Must Time.strftime patterns: www.ruby-doc.org/core-2.0/Time.html#method-i-strftime - -[id="{version}-plugins-{type}s-{plugin}-deleter_interval_secs"] -===== `deleter_interval_secs` - - * Value type is <> - * Default value is `60` - -Deleter interval when checking if upload jobs are done for file deletion. -This only affects how long files are on the hard disk after the job is done. - -[id="{version}-plugins-{type}s-{plugin}-flush_interval_secs"] -===== `flush_interval_secs` - - * Value type is <> - * Default value is `2` - -Flush interval in seconds for flushing writes to log files. 0 will flush -on every message. - -[id="{version}-plugins-{type}s-{plugin}-ignore_unknown_values"] -===== `ignore_unknown_values` - - * Value type is <> - * Default value is `false` - -Indicates if BigQuery should allow extra values that are not represented in the table schema. -If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. - -[id="{version}-plugins-{type}s-{plugin}-json_schema"] -===== `json_schema` - - * Value type is <> - * Default value is `nil` - -Schema for log data, as a hash. Example: -json_schema => { - fields => [{ - name => "timestamp" - type => "TIMESTAMP" - }, { - name => "host" - type => "STRING" - }, { - name => "message" - type => "STRING" - }] -} - -[id="{version}-plugins-{type}s-{plugin}-key_password"] -===== `key_password` - - * Value type is <> - * Default value is `"notasecret"` - -Private key password for service account private key. - -[id="{version}-plugins-{type}s-{plugin}-key_path"] -===== `key_path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Path to private key file for Google Service Account. - -[id="{version}-plugins-{type}s-{plugin}-project_id"] -===== `project_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Google Cloud Project ID (number, not Project Name!). - -[id="{version}-plugins-{type}s-{plugin}-service_account"] -===== `service_account` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Service account to access Google APIs. - -[id="{version}-plugins-{type}s-{plugin}-table_prefix"] -===== `table_prefix` - - * Value type is <> - * Default value is `"logstash"` - -BigQuery table ID prefix to be used when creating new tables for log data. -Table name will be - -[id="{version}-plugins-{type}s-{plugin}-table_separator"] -===== `table_separator` - - * Value type is <> - * Default value is `"_"` - -BigQuery table separator to be added between the table_prefix and the -date suffix. - -[id="{version}-plugins-{type}s-{plugin}-temp_directory"] -===== `temp_directory` - - * Value type is <> - * Default value is `""` - -Directory where temporary files are stored. -Defaults to /tmp/logstash-bq- - -[id="{version}-plugins-{type}s-{plugin}-temp_file_prefix"] -===== `temp_file_prefix` - - * Value type is <> - * Default value is `"logstash_bq"` - -Temporary local file prefix. Log file will follow the format: -_hostname_date.part?.log - -[id="{version}-plugins-{type}s-{plugin}-uploader_interval_secs"] -===== `uploader_interval_secs` - - * Value type is <> - * Default value is `60` - -Uploader interval when uploading new files to BigQuery. Adjust time based -on your time pattern (for example, for hourly files, this interval can be -around one hour). - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/google_bigquery-v3.2.2.asciidoc b/docs/versioned-plugins/outputs/google_bigquery-v3.2.2.asciidoc deleted file mode 100644 index 39fa46261..000000000 --- a/docs/versioned-plugins/outputs/google_bigquery-v3.2.2.asciidoc +++ /dev/null @@ -1,302 +0,0 @@ -:plugin: google_bigquery -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.2 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-google_bigquery/blob/v3.2.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Google_bigquery output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -[source,txt] ------ -Author: Rodrigo De Castro -Date: 2013-09-20 - -Copyright 2013 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------ -Summary: plugin to upload log events to Google BigQuery (BQ), rolling -files based on the date pattern provided as a configuration setting. Events -are written to files locally and, once file is closed, this plugin uploads -it to the configured BigQuery dataset. - -VERY IMPORTANT: -. To make good use of BigQuery, your log events should be parsed and -structured. Consider using grok to parse your events into fields that can -be uploaded to BQ. -. You must configure your plugin so it gets events with the same structure, -so the BigQuery schema suits them. In case you want to upload log events -with different structures, you can utilize multiple configuration blocks, -separating different log events with Logstash conditionals. More details on -Logstash conditionals can be found here: -http://logstash.net/docs/1.2.1/configuration#conditionals - -For more info on Google BigQuery, please go to: -https://developers.google.com/bigquery/ - -In order to use this plugin, a Google service account must be used. For -more information, please refer to: -https://developers.google.com/storage/docs/authentication#service_accounts - -Recommendations: - -. Experiment with the settings depending on how much log data you generate, -your needs to see "fresh" data, and how much data you could lose in the event -of crash. For instance, if you want to see recent data in BQ quickly, you -could configure the plugin to upload data every minute or so (provided you -have enough log events to justify that). Note also, that if uploads are too -frequent, there is no guarantee that they will be imported in the same order, -so later data may be available before earlier data. - -. BigQuery charges for storage and for queries, depending on how much data -it reads to perform a query. These are other aspects to consider when -considering the date pattern which will be used to create new tables and also -how to compose the queries when using BQ. For more info on BigQuery Pricing, -please access: -https://developers.google.com/bigquery/pricing - -USAGE: -This is an example of logstash config: - -[source,json] --------------------------- -output { - google_bigquery { - project_id => "folkloric-guru-278" (required) - dataset => "logs" (required) - csv_schema => "path:STRING,status:INTEGER,score:FLOAT" (required) <1> - key_path => "/path/to/privatekey.p12" (required) - key_password => "notasecret" (optional) - service_account => "1234@developer.gserviceaccount.com" (required) - temp_directory => "/tmp/logstash-bq" (optional) - temp_file_prefix => "logstash_bq" (optional) - date_pattern => "%Y-%m-%dT%H:00" (optional) - flush_interval_secs => 2 (optional) - uploader_interval_secs => 60 (optional) - deleter_interval_secs => 60 (optional) - } -} --------------------------- - -<1> Specify either a csv_schema or a json_schema. - -* Refactor common code between Google BQ and GCS plugins. -* Turn Google API code into a Plugin Mixin (like AwsConfig). -* There's no recover method, so if logstash/plugin crashes, files may not -be uploaded to BQ. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Google_bigquery Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-csv_schema>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dataset>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-date_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-deleter_interval_secs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_interval_secs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ignore_unknown_values>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-json_schema>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-service_account>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-table_prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-table_separator>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-temp_directory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-temp_file_prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-uploader_interval_secs>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-csv_schema"] -===== `csv_schema` - - * Value type is <> - * Default value is `nil` - -Schema for log data. It must follow this format: -:,:,... -Example: path:STRING,status:INTEGER,score:FLOAT - -[id="{version}-plugins-{type}s-{plugin}-dataset"] -===== `dataset` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -BigQuery dataset to which these events will be added to. - -[id="{version}-plugins-{type}s-{plugin}-date_pattern"] -===== `date_pattern` - - * Value type is <> - * Default value is `"%Y-%m-%dT%H:00"` - -Time pattern for BigQuery table, defaults to hourly tables. -Must Time.strftime patterns: www.ruby-doc.org/core-2.0/Time.html#method-i-strftime - -[id="{version}-plugins-{type}s-{plugin}-deleter_interval_secs"] -===== `deleter_interval_secs` - - * Value type is <> - * Default value is `60` - -Deleter interval when checking if upload jobs are done for file deletion. -This only affects how long files are on the hard disk after the job is done. - -[id="{version}-plugins-{type}s-{plugin}-flush_interval_secs"] -===== `flush_interval_secs` - - * Value type is <> - * Default value is `2` - -Flush interval in seconds for flushing writes to log files. 0 will flush -on every message. - -[id="{version}-plugins-{type}s-{plugin}-ignore_unknown_values"] -===== `ignore_unknown_values` - - * Value type is <> - * Default value is `false` - -Indicates if BigQuery should allow extra values that are not represented in the table schema. -If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. - -[id="{version}-plugins-{type}s-{plugin}-json_schema"] -===== `json_schema` - - * Value type is <> - * Default value is `nil` - -Schema for log data, as a hash. Example: -json_schema => { - fields => [{ - name => "timestamp" - type => "TIMESTAMP" - }, { - name => "host" - type => "STRING" - }, { - name => "message" - type => "STRING" - }] -} - -[id="{version}-plugins-{type}s-{plugin}-key_password"] -===== `key_password` - - * Value type is <> - * Default value is `"notasecret"` - -Private key password for service account private key. - -[id="{version}-plugins-{type}s-{plugin}-key_path"] -===== `key_path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Path to private key file for Google Service Account. - -[id="{version}-plugins-{type}s-{plugin}-project_id"] -===== `project_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Google Cloud Project ID (number, not Project Name!). - -[id="{version}-plugins-{type}s-{plugin}-service_account"] -===== `service_account` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Service account to access Google APIs. - -[id="{version}-plugins-{type}s-{plugin}-table_prefix"] -===== `table_prefix` - - * Value type is <> - * Default value is `"logstash"` - -BigQuery table ID prefix to be used when creating new tables for log data. -Table name will be - -[id="{version}-plugins-{type}s-{plugin}-table_separator"] -===== `table_separator` - - * Value type is <> - * Default value is `"_"` - -BigQuery table separator to be added between the table_prefix and the -date suffix. - -[id="{version}-plugins-{type}s-{plugin}-temp_directory"] -===== `temp_directory` - - * Value type is <> - * Default value is `""` - -Directory where temporary files are stored. -Defaults to /tmp/logstash-bq- - -[id="{version}-plugins-{type}s-{plugin}-temp_file_prefix"] -===== `temp_file_prefix` - - * Value type is <> - * Default value is `"logstash_bq"` - -Temporary local file prefix. Log file will follow the format: -_hostname_date.part?.log - -[id="{version}-plugins-{type}s-{plugin}-uploader_interval_secs"] -===== `uploader_interval_secs` - - * Value type is <> - * Default value is `60` - -Uploader interval when uploading new files to BigQuery. Adjust time based -on your time pattern (for example, for hourly files, this interval can be -around one hour). - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/google_bigquery-v3.2.3.asciidoc b/docs/versioned-plugins/outputs/google_bigquery-v3.2.3.asciidoc deleted file mode 100644 index 8ddb695ed..000000000 --- a/docs/versioned-plugins/outputs/google_bigquery-v3.2.3.asciidoc +++ /dev/null @@ -1,302 +0,0 @@ -:plugin: google_bigquery -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.2.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-google_bigquery/blob/v3.2.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Google_bigquery output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -[source,txt] ------ -Author: Rodrigo De Castro -Date: 2013-09-20 - -Copyright 2013 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ------ -Summary: plugin to upload log events to Google BigQuery (BQ), rolling -files based on the date pattern provided as a configuration setting. Events -are written to files locally and, once file is closed, this plugin uploads -it to the configured BigQuery dataset. - -VERY IMPORTANT: -. To make good use of BigQuery, your log events should be parsed and -structured. Consider using grok to parse your events into fields that can -be uploaded to BQ. -. You must configure your plugin so it gets events with the same structure, -so the BigQuery schema suits them. In case you want to upload log events -with different structures, you can utilize multiple configuration blocks, -separating different log events with Logstash conditionals. More details on -Logstash conditionals can be found here: -http://logstash.net/docs/1.2.1/configuration#conditionals - -For more info on Google BigQuery, please go to: -https://developers.google.com/bigquery/ - -In order to use this plugin, a Google service account must be used. For -more information, please refer to: -https://developers.google.com/storage/docs/authentication#service_accounts - -Recommendations: - -. Experiment with the settings depending on how much log data you generate, -your needs to see "fresh" data, and how much data you could lose in the event -of crash. For instance, if you want to see recent data in BQ quickly, you -could configure the plugin to upload data every minute or so (provided you -have enough log events to justify that). Note also, that if uploads are too -frequent, there is no guarantee that they will be imported in the same order, -so later data may be available before earlier data. - -. BigQuery charges for storage and for queries, depending on how much data -it reads to perform a query. These are other aspects to consider when -considering the date pattern which will be used to create new tables and also -how to compose the queries when using BQ. For more info on BigQuery Pricing, -please access: -https://developers.google.com/bigquery/pricing - -USAGE: -This is an example of logstash config: - -[source,json] --------------------------- -output { - google_bigquery { - project_id => "folkloric-guru-278" (required) - dataset => "logs" (required) - csv_schema => "path:STRING,status:INTEGER,score:FLOAT" (required) <1> - key_path => "/path/to/privatekey.p12" (required) - key_password => "notasecret" (optional) - service_account => "1234@developer.gserviceaccount.com" (required) - temp_directory => "/tmp/logstash-bq" (optional) - temp_file_prefix => "logstash_bq" (optional) - date_pattern => "%Y-%m-%dT%H:00" (optional) - flush_interval_secs => 2 (optional) - uploader_interval_secs => 60 (optional) - deleter_interval_secs => 60 (optional) - } -} --------------------------- - -<1> Specify either a csv_schema or a json_schema. - -* Refactor common code between Google BQ and GCS plugins. -* Turn Google API code into a Plugin Mixin (like AwsConfig). -* There's no recover method, so if logstash/plugin crashes, files may not -be uploaded to BQ. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Google_bigquery Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-csv_schema>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dataset>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-date_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-deleter_interval_secs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_interval_secs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ignore_unknown_values>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-json_schema>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-service_account>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-table_prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-table_separator>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-temp_directory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-temp_file_prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-uploader_interval_secs>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-csv_schema"] -===== `csv_schema` - - * Value type is <> - * Default value is `nil` - -Schema for log data. It must follow this format: -:,:,... -Example: path:STRING,status:INTEGER,score:FLOAT - -[id="{version}-plugins-{type}s-{plugin}-dataset"] -===== `dataset` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -BigQuery dataset to which these events will be added to. - -[id="{version}-plugins-{type}s-{plugin}-date_pattern"] -===== `date_pattern` - - * Value type is <> - * Default value is `"%Y-%m-%dT%H:00"` - -Time pattern for BigQuery table, defaults to hourly tables. -Must Time.strftime patterns: www.ruby-doc.org/core-2.0/Time.html#method-i-strftime - -[id="{version}-plugins-{type}s-{plugin}-deleter_interval_secs"] -===== `deleter_interval_secs` - - * Value type is <> - * Default value is `60` - -Deleter interval when checking if upload jobs are done for file deletion. -This only affects how long files are on the hard disk after the job is done. - -[id="{version}-plugins-{type}s-{plugin}-flush_interval_secs"] -===== `flush_interval_secs` - - * Value type is <> - * Default value is `2` - -Flush interval in seconds for flushing writes to log files. 0 will flush -on every message. - -[id="{version}-plugins-{type}s-{plugin}-ignore_unknown_values"] -===== `ignore_unknown_values` - - * Value type is <> - * Default value is `false` - -Indicates if BigQuery should allow extra values that are not represented in the table schema. -If true, the extra values are ignored. If false, records with extra columns are treated as bad records, and if there are too many bad records, an invalid error is returned in the job result. The default value is false. - -[id="{version}-plugins-{type}s-{plugin}-json_schema"] -===== `json_schema` - - * Value type is <> - * Default value is `nil` - -Schema for log data, as a hash. Example: -json_schema => { - fields => [{ - name => "timestamp" - type => "TIMESTAMP" - }, { - name => "host" - type => "STRING" - }, { - name => "message" - type => "STRING" - }] -} - -[id="{version}-plugins-{type}s-{plugin}-key_password"] -===== `key_password` - - * Value type is <> - * Default value is `"notasecret"` - -Private key password for service account private key. - -[id="{version}-plugins-{type}s-{plugin}-key_path"] -===== `key_path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Path to private key file for Google Service Account. - -[id="{version}-plugins-{type}s-{plugin}-project_id"] -===== `project_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Google Cloud Project ID (number, not Project Name!). - -[id="{version}-plugins-{type}s-{plugin}-service_account"] -===== `service_account` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Service account to access Google APIs. - -[id="{version}-plugins-{type}s-{plugin}-table_prefix"] -===== `table_prefix` - - * Value type is <> - * Default value is `"logstash"` - -BigQuery table ID prefix to be used when creating new tables for log data. -Table name will be - -[id="{version}-plugins-{type}s-{plugin}-table_separator"] -===== `table_separator` - - * Value type is <> - * Default value is `"_"` - -BigQuery table separator to be added between the table_prefix and the -date suffix. - -[id="{version}-plugins-{type}s-{plugin}-temp_directory"] -===== `temp_directory` - - * Value type is <> - * Default value is `""` - -Directory where temporary files are stored. -Defaults to /tmp/logstash-bq- - -[id="{version}-plugins-{type}s-{plugin}-temp_file_prefix"] -===== `temp_file_prefix` - - * Value type is <> - * Default value is `"logstash_bq"` - -Temporary local file prefix. Log file will follow the format: -_hostname_date.part?.log - -[id="{version}-plugins-{type}s-{plugin}-uploader_interval_secs"] -===== `uploader_interval_secs` - - * Value type is <> - * Default value is `60` - -Uploader interval when uploading new files to BigQuery. Adjust time based -on your time pattern (for example, for hourly files, this interval can be -around one hour). - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/google_cloud_storage-index.asciidoc b/docs/versioned-plugins/outputs/google_cloud_storage-index.asciidoc deleted file mode 100644 index e1ae4540e..000000000 --- a/docs/versioned-plugins/outputs/google_cloud_storage-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: google_cloud_storage -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::google_cloud_storage-v3.0.4.asciidoc[] -include::google_cloud_storage-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/google_cloud_storage-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/google_cloud_storage-v3.0.3.asciidoc deleted file mode 100644 index 138f559ac..000000000 --- a/docs/versioned-plugins/outputs/google_cloud_storage-v3.0.3.asciidoc +++ /dev/null @@ -1,206 +0,0 @@ -:plugin: google_cloud_storage -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-google_cloud_storage/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Google_cloud_storage output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Summary: plugin to upload log events to Google Cloud Storage (GCS), rolling -files based on the date pattern provided as a configuration setting. Events -are written to files locally and, once file is closed, this plugin uploads -it to the configured bucket. - -For more info on Google Cloud Storage, please go to: -https://cloud.google.com/products/cloud-storage - -In order to use this plugin, a Google service account must be used. For -more information, please refer to: -https://developers.google.com/storage/docs/authentication#service_accounts - -Recommendation: experiment with the settings depending on how much log -data you generate, so the uploader can keep up with the generated logs. -Using gzip output can be a good option to reduce network traffic when -uploading the log files and in terms of storage costs as well. - -USAGE: -This is an example of logstash config: - -[source,json] --------------------------- -output { - google_cloud_storage { - bucket => "my_bucket" (required) - key_path => "/path/to/privatekey.p12" (required) - key_password => "notasecret" (optional) - service_account => "1234@developer.gserviceaccount.com" (required) - temp_directory => "/tmp/logstash-gcs" (optional) - log_file_prefix => "logstash_gcs" (optional) - max_file_size_kbytes => 1024 (optional) - output_format => "plain" (optional) - date_pattern => "%Y-%m-%dT%H:00" (optional) - flush_interval_secs => 2 (optional) - gzip => false (optional) - uploader_interval_secs => 60 (optional) - } -} --------------------------- - -* Support logstash event variables to determine filename. -* Turn Google API code into a Plugin Mixin (like AwsConfig). -* There's no recover method, so if logstash/plugin crashes, files may not -be uploaded to GCS. -* Allow user to configure file name. -* Allow parallel uploads for heavier loads (+ connection configuration if -exposed by Ruby API client) - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Google_cloud_storage Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-date_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_interval_secs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-log_file_prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_file_size_kbytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-output_format>> |<>, one of `["json", "plain"]`|No -| <<{version}-plugins-{type}s-{plugin}-service_account>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-temp_directory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-uploader_interval_secs>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-bucket"] -===== `bucket` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -GCS bucket name, without "gs://" or any other prefix. - -[id="{version}-plugins-{type}s-{plugin}-date_pattern"] -===== `date_pattern` - - * Value type is <> - * Default value is `"%Y-%m-%dT%H:00"` - -Time pattern for log file, defaults to hourly files. -Must Time.strftime patterns: www.ruby-doc.org/core-2.0/Time.html#method-i-strftime - -[id="{version}-plugins-{type}s-{plugin}-flush_interval_secs"] -===== `flush_interval_secs` - - * Value type is <> - * Default value is `2` - -Flush interval in seconds for flushing writes to log files. 0 will flush -on every message. - -[id="{version}-plugins-{type}s-{plugin}-gzip"] -===== `gzip` - - * Value type is <> - * Default value is `false` - -Gzip output stream when writing events to log files. - -[id="{version}-plugins-{type}s-{plugin}-key_password"] -===== `key_password` - - * Value type is <> - * Default value is `"notasecret"` - -GCS private key password. - -[id="{version}-plugins-{type}s-{plugin}-key_path"] -===== `key_path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -GCS path to private key file. - -[id="{version}-plugins-{type}s-{plugin}-log_file_prefix"] -===== `log_file_prefix` - - * Value type is <> - * Default value is `"logstash_gcs"` - -Log file prefix. Log file will follow the format: -_hostname_date<.part?>.log - -[id="{version}-plugins-{type}s-{plugin}-max_file_size_kbytes"] -===== `max_file_size_kbytes` - - * Value type is <> - * Default value is `10000` - -Sets max file size in kbytes. 0 disable max file check. - -[id="{version}-plugins-{type}s-{plugin}-output_format"] -===== `output_format` - - * Value can be any of: `json`, `plain` - * Default value is `"plain"` - -The event format you want to store in files. Defaults to plain text. - -[id="{version}-plugins-{type}s-{plugin}-service_account"] -===== `service_account` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -GCS service account. - -[id="{version}-plugins-{type}s-{plugin}-temp_directory"] -===== `temp_directory` - - * Value type is <> - * Default value is `""` - -Directory where temporary files are stored. -Defaults to /tmp/logstash-gcs- - -[id="{version}-plugins-{type}s-{plugin}-uploader_interval_secs"] -===== `uploader_interval_secs` - - * Value type is <> - * Default value is `60` - -Uploader interval when uploading new files to GCS. Adjust time based -on your time pattern (for example, for hourly files, this interval can be -around one hour). - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/google_cloud_storage-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/google_cloud_storage-v3.0.4.asciidoc deleted file mode 100644 index accf9fd08..000000000 --- a/docs/versioned-plugins/outputs/google_cloud_storage-v3.0.4.asciidoc +++ /dev/null @@ -1,206 +0,0 @@ -:plugin: google_cloud_storage -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-google_cloud_storage/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Google_cloud_storage output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Summary: plugin to upload log events to Google Cloud Storage (GCS), rolling -files based on the date pattern provided as a configuration setting. Events -are written to files locally and, once file is closed, this plugin uploads -it to the configured bucket. - -For more info on Google Cloud Storage, please go to: -https://cloud.google.com/products/cloud-storage - -In order to use this plugin, a Google service account must be used. For -more information, please refer to: -https://developers.google.com/storage/docs/authentication#service_accounts - -Recommendation: experiment with the settings depending on how much log -data you generate, so the uploader can keep up with the generated logs. -Using gzip output can be a good option to reduce network traffic when -uploading the log files and in terms of storage costs as well. - -USAGE: -This is an example of logstash config: - -[source,json] --------------------------- -output { - google_cloud_storage { - bucket => "my_bucket" (required) - key_path => "/path/to/privatekey.p12" (required) - key_password => "notasecret" (optional) - service_account => "1234@developer.gserviceaccount.com" (required) - temp_directory => "/tmp/logstash-gcs" (optional) - log_file_prefix => "logstash_gcs" (optional) - max_file_size_kbytes => 1024 (optional) - output_format => "plain" (optional) - date_pattern => "%Y-%m-%dT%H:00" (optional) - flush_interval_secs => 2 (optional) - gzip => false (optional) - uploader_interval_secs => 60 (optional) - } -} --------------------------- - -* Support logstash event variables to determine filename. -* Turn Google API code into a Plugin Mixin (like AwsConfig). -* There's no recover method, so if logstash/plugin crashes, files may not -be uploaded to GCS. -* Allow user to configure file name. -* Allow parallel uploads for heavier loads (+ connection configuration if -exposed by Ruby API client) - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Google_cloud_storage Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-date_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_interval_secs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-log_file_prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_file_size_kbytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-output_format>> |<>, one of `["json", "plain"]`|No -| <<{version}-plugins-{type}s-{plugin}-service_account>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-temp_directory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-uploader_interval_secs>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-bucket"] -===== `bucket` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -GCS bucket name, without "gs://" or any other prefix. - -[id="{version}-plugins-{type}s-{plugin}-date_pattern"] -===== `date_pattern` - - * Value type is <> - * Default value is `"%Y-%m-%dT%H:00"` - -Time pattern for log file, defaults to hourly files. -Must Time.strftime patterns: www.ruby-doc.org/core-2.0/Time.html#method-i-strftime - -[id="{version}-plugins-{type}s-{plugin}-flush_interval_secs"] -===== `flush_interval_secs` - - * Value type is <> - * Default value is `2` - -Flush interval in seconds for flushing writes to log files. 0 will flush -on every message. - -[id="{version}-plugins-{type}s-{plugin}-gzip"] -===== `gzip` - - * Value type is <> - * Default value is `false` - -Gzip output stream when writing events to log files. - -[id="{version}-plugins-{type}s-{plugin}-key_password"] -===== `key_password` - - * Value type is <> - * Default value is `"notasecret"` - -GCS private key password. - -[id="{version}-plugins-{type}s-{plugin}-key_path"] -===== `key_path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -GCS path to private key file. - -[id="{version}-plugins-{type}s-{plugin}-log_file_prefix"] -===== `log_file_prefix` - - * Value type is <> - * Default value is `"logstash_gcs"` - -Log file prefix. Log file will follow the format: -_hostname_date<.part?>.log - -[id="{version}-plugins-{type}s-{plugin}-max_file_size_kbytes"] -===== `max_file_size_kbytes` - - * Value type is <> - * Default value is `10000` - -Sets max file size in kbytes. 0 disable max file check. - -[id="{version}-plugins-{type}s-{plugin}-output_format"] -===== `output_format` - - * Value can be any of: `json`, `plain` - * Default value is `"plain"` - -The event format you want to store in files. Defaults to plain text. - -[id="{version}-plugins-{type}s-{plugin}-service_account"] -===== `service_account` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -GCS service account. - -[id="{version}-plugins-{type}s-{plugin}-temp_directory"] -===== `temp_directory` - - * Value type is <> - * Default value is `""` - -Directory where temporary files are stored. -Defaults to /tmp/logstash-gcs- - -[id="{version}-plugins-{type}s-{plugin}-uploader_interval_secs"] -===== `uploader_interval_secs` - - * Value type is <> - * Default value is `60` - -Uploader interval when uploading new files to GCS. Adjust time based -on your time pattern (for example, for hourly files, this interval can be -around one hour). - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/graphtastic-index.asciidoc b/docs/versioned-plugins/outputs/graphtastic-index.asciidoc deleted file mode 100644 index 19ecb8c93..000000000 --- a/docs/versioned-plugins/outputs/graphtastic-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: graphtastic -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::graphtastic-v3.0.3.asciidoc[] -include::graphtastic-v3.0.2.asciidoc[] -include::graphtastic-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/graphtastic-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/graphtastic-v3.0.1.asciidoc deleted file mode 100644 index ee3cab446..000000000 --- a/docs/versioned-plugins/outputs/graphtastic-v3.0.1.asciidoc +++ /dev/null @@ -1,148 +0,0 @@ -:plugin: graphtastic -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-graphtastic/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Graphtastic output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -A plugin for a newly developed Java/Spring Metrics application -I didn't really want to code this project but I couldn't find -a respectable alternative that would also run on any Windows -machine - which is the problem and why I am not going with Graphite -and statsd. This application provides multiple integration options -so as to make its use under your network requirements possible. -This includes a REST option that is always enabled for your use -in case you want to write a small script to send the occasional -metric data. - -Find GraphTastic here : https://github.com/NickPadilla/GraphTastic - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Graphtastic Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-batch_number>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-context>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-error_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-integration>> |<>, one of `["udp", "tcp", "rmi", "rest"]`|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-batch_number"] -===== `batch_number` - - * Value type is <> - * Default value is `60` - -the number of metrics to send to GraphTastic at one time. 60 seems to be the perfect -amount for UDP, with default packet size. - -[id="{version}-plugins-{type}s-{plugin}-context"] -===== `context` - - * Value type is <> - * Default value is `"graphtastic"` - -if using rest as your end point you need to also provide the application url -it defaults to localhost/graphtastic. You can customize the application url -by changing the name of the .war file. There are other ways to change the -application context, but they vary depending on the Application Server in use. -Please consult your application server documentation for more on application -contexts. - -[id="{version}-plugins-{type}s-{plugin}-error_file"] -===== `error_file` - - * Value type is <> - * Default value is `""` - -setting allows you to specify where we save errored transactions -this makes the most sense at this point - will need to decide -on how we reintegrate these error metrics -NOT IMPLEMENTED! - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"127.0.0.1"` - -host for the graphtastic server - defaults to 127.0.0.1 - -[id="{version}-plugins-{type}s-{plugin}-integration"] -===== `integration` - - * Value can be any of: `udp`, `tcp`, `rmi`, `rest` - * Default value is `"udp"` - -options are udp(fastest - default) - rmi(faster) - rest(fast) - tcp(don't use TCP yet - some problems - errors out on linux) - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * Value type is <> - * Default value is `{}` - -metrics hash - you will provide a name for your metric and the metric -data as key value pairs. so for example: - -[source,ruby] -metrics => { "Response" => "%{response}" } - -example for the logstash config - -[source,ruby] -metrics => [ "Response", "%{response}" ] - -NOTE: you can also use the dynamic fields for the key value as well as the actual value - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * There is no default value for this setting. - -port for the graphtastic instance - defaults to 1199 for RMI, 1299 for TCP, 1399 for UDP, and 8080 for REST - -[id="{version}-plugins-{type}s-{plugin}-retries"] -===== `retries` - - * Value type is <> - * Default value is `1` - -number of attempted retry after send error - currently only way to integrate -errored transactions - should try and save to a file or later consumption -either by graphtastic utility or by this program after connectivity is -ensured to be established. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/graphtastic-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/graphtastic-v3.0.2.asciidoc deleted file mode 100644 index 76bdcc9ac..000000000 --- a/docs/versioned-plugins/outputs/graphtastic-v3.0.2.asciidoc +++ /dev/null @@ -1,148 +0,0 @@ -:plugin: graphtastic -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-graphtastic/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Graphtastic output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -A plugin for a newly developed Java/Spring Metrics application -I didn't really want to code this project but I couldn't find -a respectable alternative that would also run on any Windows -machine - which is the problem and why I am not going with Graphite -and statsd. This application provides multiple integration options -so as to make its use under your network requirements possible. -This includes a REST option that is always enabled for your use -in case you want to write a small script to send the occasional -metric data. - -Find GraphTastic here : https://github.com/NickPadilla/GraphTastic - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Graphtastic Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-batch_number>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-context>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-error_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-integration>> |<>, one of `["udp", "tcp", "rmi", "rest"]`|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-batch_number"] -===== `batch_number` - - * Value type is <> - * Default value is `60` - -the number of metrics to send to GraphTastic at one time. 60 seems to be the perfect -amount for UDP, with default packet size. - -[id="{version}-plugins-{type}s-{plugin}-context"] -===== `context` - - * Value type is <> - * Default value is `"graphtastic"` - -if using rest as your end point you need to also provide the application url -it defaults to localhost/graphtastic. You can customize the application url -by changing the name of the .war file. There are other ways to change the -application context, but they vary depending on the Application Server in use. -Please consult your application server documentation for more on application -contexts. - -[id="{version}-plugins-{type}s-{plugin}-error_file"] -===== `error_file` - - * Value type is <> - * Default value is `""` - -setting allows you to specify where we save errored transactions -this makes the most sense at this point - will need to decide -on how we reintegrate these error metrics -NOT IMPLEMENTED! - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"127.0.0.1"` - -host for the graphtastic server - defaults to 127.0.0.1 - -[id="{version}-plugins-{type}s-{plugin}-integration"] -===== `integration` - - * Value can be any of: `udp`, `tcp`, `rmi`, `rest` - * Default value is `"udp"` - -options are udp(fastest - default) - rmi(faster) - rest(fast) - tcp(don't use TCP yet - some problems - errors out on linux) - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * Value type is <> - * Default value is `{}` - -metrics hash - you will provide a name for your metric and the metric -data as key value pairs. so for example: - -[source,ruby] -metrics => { "Response" => "%{response}" } - -example for the logstash config - -[source,ruby] -metrics => [ "Response", "%{response}" ] - -NOTE: you can also use the dynamic fields for the key value as well as the actual value - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * There is no default value for this setting. - -port for the graphtastic instance - defaults to 1199 for RMI, 1299 for TCP, 1399 for UDP, and 8080 for REST - -[id="{version}-plugins-{type}s-{plugin}-retries"] -===== `retries` - - * Value type is <> - * Default value is `1` - -number of attempted retry after send error - currently only way to integrate -errored transactions - should try and save to a file or later consumption -either by graphtastic utility or by this program after connectivity is -ensured to be established. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/graphtastic-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/graphtastic-v3.0.3.asciidoc deleted file mode 100644 index 61f860fc0..000000000 --- a/docs/versioned-plugins/outputs/graphtastic-v3.0.3.asciidoc +++ /dev/null @@ -1,148 +0,0 @@ -:plugin: graphtastic -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-graphtastic/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Graphtastic output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -A plugin for a newly developed Java/Spring Metrics application -I didn't really want to code this project but I couldn't find -a respectable alternative that would also run on any Windows -machine - which is the problem and why I am not going with Graphite -and statsd. This application provides multiple integration options -so as to make its use under your network requirements possible. -This includes a REST option that is always enabled for your use -in case you want to write a small script to send the occasional -metric data. - -Find GraphTastic here : https://github.com/NickPadilla/GraphTastic - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Graphtastic Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-batch_number>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-context>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-error_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-integration>> |<>, one of `["udp", "tcp", "rmi", "rest"]`|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-batch_number"] -===== `batch_number` - - * Value type is <> - * Default value is `60` - -the number of metrics to send to GraphTastic at one time. 60 seems to be the perfect -amount for UDP, with default packet size. - -[id="{version}-plugins-{type}s-{plugin}-context"] -===== `context` - - * Value type is <> - * Default value is `"graphtastic"` - -if using rest as your end point you need to also provide the application url -it defaults to localhost/graphtastic. You can customize the application url -by changing the name of the .war file. There are other ways to change the -application context, but they vary depending on the Application Server in use. -Please consult your application server documentation for more on application -contexts. - -[id="{version}-plugins-{type}s-{plugin}-error_file"] -===== `error_file` - - * Value type is <> - * Default value is `""` - -setting allows you to specify where we save errored transactions -this makes the most sense at this point - will need to decide -on how we reintegrate these error metrics -NOT IMPLEMENTED! - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"127.0.0.1"` - -host for the graphtastic server - defaults to 127.0.0.1 - -[id="{version}-plugins-{type}s-{plugin}-integration"] -===== `integration` - - * Value can be any of: `udp`, `tcp`, `rmi`, `rest` - * Default value is `"udp"` - -options are udp(fastest - default) - rmi(faster) - rest(fast) - tcp(don't use TCP yet - some problems - errors out on linux) - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * Value type is <> - * Default value is `{}` - -metrics hash - you will provide a name for your metric and the metric -data as key value pairs. so for example: - -[source,ruby] -metrics => { "Response" => "%{response}" } - -example for the logstash config - -[source,ruby] -metrics => [ "Response", "%{response}" ] - -NOTE: you can also use the dynamic fields for the key value as well as the actual value - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * There is no default value for this setting. - -port for the graphtastic instance - defaults to 1199 for RMI, 1299 for TCP, 1399 for UDP, and 8080 for REST - -[id="{version}-plugins-{type}s-{plugin}-retries"] -===== `retries` - - * Value type is <> - * Default value is `1` - -number of attempted retry after send error - currently only way to integrate -errored transactions - should try and save to a file or later consumption -either by graphtastic utility or by this program after connectivity is -ensured to be established. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/hipchat-index.asciidoc b/docs/versioned-plugins/outputs/hipchat-index.asciidoc deleted file mode 100644 index 856f57c48..000000000 --- a/docs/versioned-plugins/outputs/hipchat-index.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -:plugin: hipchat -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-06-23 -|======================================================================= - -include::hipchat-v4.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/hipchat-v4.0.3.asciidoc b/docs/versioned-plugins/outputs/hipchat-v4.0.3.asciidoc deleted file mode 100644 index d3bc367ca..000000000 --- a/docs/versioned-plugins/outputs/hipchat-v4.0.3.asciidoc +++ /dev/null @@ -1,122 +0,0 @@ -:plugin: hipchat -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-hipchat/blob/v4.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Hipchat output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output allows you to write events to https://www.hipchat.com/[HipChat]. - -Make sure your API token have the appropriate permissions and support -sending messages. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Hipchat Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-color>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-from>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>, one of `["html", "text"]`|No -| <<{version}-plugins-{type}s-{plugin}-room_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-token>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-trigger_notify>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-color"] -===== `color` - - * Value type is <> - * Default value is `"yellow"` - -Background color for message. -HipChat currently supports one of "yellow", "red", "green", "purple", -"gray", or "random". (default: yellow), support fieldref - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * Default value is `"%{message}"` - -Message format to send, event tokens are usable here. - -[id="{version}-plugins-{type}s-{plugin}-from"] -===== `from` - - * Value type is <> - * Default value is `"logstash"` - -The name the message will appear be sent from, you can use fieldref - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * There is no default value for this setting. - -HipChat host to use - -[id="{version}-plugins-{type}s-{plugin}-message_format"] -===== `message_format` - - * Value can be any of: `html`, `text` - * Default value is `"html"` - -Specify `Message Format` - -[id="{version}-plugins-{type}s-{plugin}-room_id"] -===== `room_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The ID or name of the room, support fieldref - -[id="{version}-plugins-{type}s-{plugin}-token"] -===== `token` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The HipChat authentication token. - -[id="{version}-plugins-{type}s-{plugin}-trigger_notify"] -===== `trigger_notify` - - * Value type is <> - * Default value is `false` - -Whether or not this message should trigger a notification for people in the room. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/http-index.asciidoc b/docs/versioned-plugins/outputs/http-index.asciidoc deleted file mode 100644 index 9c52387b4..000000000 --- a/docs/versioned-plugins/outputs/http-index.asciidoc +++ /dev/null @@ -1,30 +0,0 @@ -:plugin: http -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2018-01-09 -| <> | 2017-11-07 -| <> | 2017-08-23 -| <> | 2017-08-16 -| <> | 2017-08-02 -| <> | 2017-08-23 -| <> | 2017-08-18 -| <> | 2017-07-06 -| <> | 2017-06-23 -| <> | 2017-05-08 -|======================================================================= - -include::http-v5.1.2.asciidoc[] -include::http-v5.1.1.asciidoc[] -include::http-v5.1.0.asciidoc[] -include::http-v5.0.1.asciidoc[] -include::http-v5.0.0.asciidoc[] -include::http-v4.4.0.asciidoc[] -include::http-v4.3.4.asciidoc[] -include::http-v4.3.2.asciidoc[] -include::http-v4.3.1.asciidoc[] -include::http-v4.3.0.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/http-v4.3.0.asciidoc b/docs/versioned-plugins/outputs/http-v4.3.0.asciidoc deleted file mode 100644 index 93f540faa..000000000 --- a/docs/versioned-plugins/outputs/http-v4.3.0.asciidoc +++ /dev/null @@ -1,380 +0,0 @@ -:plugin: http -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.3.0 -:release_date: 2017-05-08 -:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v4.3.0/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Http - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http Output Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No -| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-content_type"] -===== `content_type` - - * Value type is <> - * There is no default value for this setting. - -Content type - -If not specified, this defaults to the following: - -* if format is "json", "application/json" -* if format is "form", "application/x-www-form-urlencoded" - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value can be any of: `json`, `form`, `message` - * Default value is `"json"` - -Set the format of the http body. - -If form, then the body will be the mapping (or whole event) converted -into a query parameter string, e.g. `foo=bar&baz=fizz...` - -If message, then the body will be the result of formatting the event according to message - -Otherwise, the event is sent as json. - -[id="{version}-plugins-{type}s-{plugin}-headers"] -===== `headers` - - * Value type is <> - * There is no default value for this setting. - -Custom headers to use -format is `headers => ["X-My-Header", "%{host}"]` - -[id="{version}-plugins-{type}s-{plugin}-http_method"] -===== `http_method` - - * This is a required setting. - * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` - * There is no default value for this setting. - -The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" - -[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] -===== `ignorable_codes` - - * Value type is <> - * There is no default value for this setting. - -If you would like to consider some non-2xx codes to be successes -enumerate them here. Responses returning these codes will be considered successes - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-mapping"] -===== `mapping` - - * Value type is <> - * There is no default value for this setting. - -This lets you choose the structure and parts of the event that are sent. - - -For example: -[source,ruby] - mapping => {"foo" => "%{host}" - "bar" => "%{type}"} - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_failed"] -===== `retry_failed` - - * Value type is <> - * Default value is `true` - -Set this to false if you don't want this output to retry failed requests - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] -===== `retryable_codes` - - * Value type is <> - * Default value is `[429, 500, 502, 503, 504]` - -If encountered as response codes this plugin will retry these requests - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] -===== `ssl_certificate_validation` - - * Value type is <> - * Default value is `true` - -Set this to false to disable SSL/TLS certificate validation -Note: setting this to false is generally considered insecure! - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -This output lets you send events to a -generic HTTP(S) endpoint - -This output will execute up to 'pool_max' requests in parallel for performance. -Consider this when tuning this plugin for performance. - -Additionally, note that when parallel execution is used strict ordering of events is not -guaranteed! - -Beware, this gem does not yet support codecs. Please use the 'format' option for now. -URL to use - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -# You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/http-v4.3.1.asciidoc b/docs/versioned-plugins/outputs/http-v4.3.1.asciidoc deleted file mode 100644 index fa66f652c..000000000 --- a/docs/versioned-plugins/outputs/http-v4.3.1.asciidoc +++ /dev/null @@ -1,381 +0,0 @@ -:plugin: http -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.3.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v4.3.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Http output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No -| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-content_type"] -===== `content_type` - - * Value type is <> - * There is no default value for this setting. - -Content type - -If not specified, this defaults to the following: - -* if format is "json", "application/json" -* if format is "form", "application/x-www-form-urlencoded" - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value can be any of: `json`, `form`, `message` - * Default value is `"json"` - -Set the format of the http body. - -If form, then the body will be the mapping (or whole event) converted -into a query parameter string, e.g. `foo=bar&baz=fizz...` - -If message, then the body will be the result of formatting the event according to message - -Otherwise, the event is sent as json. - -[id="{version}-plugins-{type}s-{plugin}-headers"] -===== `headers` - - * Value type is <> - * There is no default value for this setting. - -Custom headers to use -format is `headers => ["X-My-Header", "%{host}"]` - -[id="{version}-plugins-{type}s-{plugin}-http_method"] -===== `http_method` - - * This is a required setting. - * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` - * There is no default value for this setting. - -The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" - -[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] -===== `ignorable_codes` - - * Value type is <> - * There is no default value for this setting. - -If you would like to consider some non-2xx codes to be successes -enumerate them here. Responses returning these codes will be considered successes - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-mapping"] -===== `mapping` - - * Value type is <> - * There is no default value for this setting. - -This lets you choose the structure and parts of the event that are sent. - - -For example: -[source,ruby] - mapping => {"foo" => "%{host}" - "bar" => "%{type}"} - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_failed"] -===== `retry_failed` - - * Value type is <> - * Default value is `true` - -Set this to false if you don't want this output to retry failed requests - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] -===== `retryable_codes` - - * Value type is <> - * Default value is `[429, 500, 502, 503, 504]` - -If encountered as response codes this plugin will retry these requests - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] -===== `ssl_certificate_validation` - - * Value type is <> - * Default value is `true` - -Set this to false to disable SSL/TLS certificate validation -Note: setting this to false is generally considered insecure! - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -This output lets you send events to a -generic HTTP(S) endpoint - -This output will execute up to 'pool_max' requests in parallel for performance. -Consider this when tuning this plugin for performance. - -Additionally, note that when parallel execution is used strict ordering of events is not -guaranteed! - -Beware, this gem does not yet support codecs. Please use the 'format' option for now. -URL to use - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -# You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/http-v4.3.2.asciidoc b/docs/versioned-plugins/outputs/http-v4.3.2.asciidoc deleted file mode 100644 index 659b382b3..000000000 --- a/docs/versioned-plugins/outputs/http-v4.3.2.asciidoc +++ /dev/null @@ -1,377 +0,0 @@ -:plugin: http -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.3.2 -:release_date: 2017-07-06 -:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v4.3.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Http output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No -| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-content_type"] -===== `content_type` - - * Value type is <> - * There is no default value for this setting. - -Content type - -If not specified, this defaults to the following: - -* if format is "json", "application/json" -* if format is "form", "application/x-www-form-urlencoded" - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value can be any of: `json`, `form`, `message` - * Default value is `"json"` - -Set the format of the http body. - -If form, then the body will be the mapping (or whole event) converted -into a query parameter string, e.g. `foo=bar&baz=fizz...` - -If message, then the body will be the result of formatting the event according to message - -Otherwise, the event is sent as json. - -[id="{version}-plugins-{type}s-{plugin}-headers"] -===== `headers` - - * Value type is <> - * There is no default value for this setting. - -Custom headers to use -format is `headers => ["X-My-Header", "%{host}"]` - -[id="{version}-plugins-{type}s-{plugin}-http_method"] -===== `http_method` - - * This is a required setting. - * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` - * There is no default value for this setting. - -The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" - -[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] -===== `ignorable_codes` - - * Value type is <> - * There is no default value for this setting. - -If you would like to consider some non-2xx codes to be successes -enumerate them here. Responses returning these codes will be considered successes - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-mapping"] -===== `mapping` - - * Value type is <> - * There is no default value for this setting. - -This lets you choose the structure and parts of the event that are sent. - - -For example: -[source,ruby] - mapping => {"foo" => "%{host}" - "bar" => "%{type}"} - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_failed"] -===== `retry_failed` - - * Value type is <> - * Default value is `true` - -Set this to false if you don't want this output to retry failed requests - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] -===== `retryable_codes` - - * Value type is <> - * Default value is `[429, 500, 502, 503, 504]` - -If encountered as response codes this plugin will retry these requests - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] -===== `ssl_certificate_validation` - - * Value type is <> - * Default value is `true` - -Set this to false to disable SSL/TLS certificate validation -Note: setting this to false is generally considered insecure! - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -This output lets you send events to a -generic HTTP(S) endpoint - -This output will execute up to 'pool_max' requests in parallel for performance. -Consider this when tuning this plugin for performance. - -Additionally, note that when parallel execution is used strict ordering of events is not -guaranteed! - -Beware, this gem does not yet support codecs. Please use the 'format' option for now. -URL to use - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -# You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/http-v4.3.4.asciidoc b/docs/versioned-plugins/outputs/http-v4.3.4.asciidoc deleted file mode 100644 index 79519d7db..000000000 --- a/docs/versioned-plugins/outputs/http-v4.3.4.asciidoc +++ /dev/null @@ -1,379 +0,0 @@ -:plugin: http -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.3.4 -:release_date: 2017-08-18 -:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v4.3.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Http output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you send events to a generic HTTP(S) endpoint. - -This output will execute up to 'pool_max' requests in parallel for performance. -Consider this when tuning this plugin for performance. - -Additionally, note that when parallel execution is used strict ordering of events is not -guaranteed! - -Beware, this gem does not yet support codecs. Please use the 'format' option for now. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No -| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-content_type"] -===== `content_type` - - * Value type is <> - * There is no default value for this setting. - -Content type - -If not specified, this defaults to the following: - -* if format is "json", "application/json" -* if format is "form", "application/x-www-form-urlencoded" - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value can be any of: `json`, `form`, `message` - * Default value is `"json"` - -Set the format of the http body. - -If form, then the body will be the mapping (or whole event) converted -into a query parameter string, e.g. `foo=bar&baz=fizz...` - -If message, then the body will be the result of formatting the event according to message - -Otherwise, the event is sent as json. - -[id="{version}-plugins-{type}s-{plugin}-headers"] -===== `headers` - - * Value type is <> - * There is no default value for this setting. - -Custom headers to use -format is `headers => ["X-My-Header", "%{host}"]` - -[id="{version}-plugins-{type}s-{plugin}-http_method"] -===== `http_method` - - * This is a required setting. - * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` - * There is no default value for this setting. - -The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" - -[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] -===== `ignorable_codes` - - * Value type is <> - * There is no default value for this setting. - -If you would like to consider some non-2xx codes to be successes -enumerate them here. Responses returning these codes will be considered successes - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-mapping"] -===== `mapping` - - * Value type is <> - * There is no default value for this setting. - -This lets you choose the structure and parts of the event that are sent. - - -For example: -[source,ruby] - mapping => {"foo" => "%{host}" - "bar" => "%{type}"} - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_failed"] -===== `retry_failed` - - * Value type is <> - * Default value is `true` - -Set this to false if you don't want this output to retry failed requests - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] -===== `retryable_codes` - - * Value type is <> - * Default value is `[429, 500, 502, 503, 504]` - -If encountered as response codes this plugin will retry these requests - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] -===== `ssl_certificate_validation` - - * Value type is <> - * Default value is `true` - -Set this to false to disable SSL/TLS certificate validation -Note: setting this to false is generally considered insecure! - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -URL to use - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/http-v4.4.0.asciidoc b/docs/versioned-plugins/outputs/http-v4.4.0.asciidoc deleted file mode 100644 index 3f38c4f33..000000000 --- a/docs/versioned-plugins/outputs/http-v4.4.0.asciidoc +++ /dev/null @@ -1,389 +0,0 @@ -:plugin: http -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.4.0 -:release_date: 2017-08-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v4.4.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Http output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you send events to a generic HTTP(S) endpoint. - -This output will execute up to 'pool_max' requests in parallel for performance. -Consider this when tuning this plugin for performance. - -Additionally, note that when parallel execution is used strict ordering of events is not -guaranteed! - -Beware, this gem does not yet support codecs. Please use the 'format' option for now. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No -| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_validation>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-content_type"] -===== `content_type` - - * Value type is <> - * There is no default value for this setting. - -Content type - -If not specified, this defaults to the following: - -* if format is "json", "application/json" -* if format is "form", "application/x-www-form-urlencoded" - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value can be any of: `json`, `form`, `message` - * Default value is `"json"` - -Set the format of the http body. - -If form, then the body will be the mapping (or whole event) converted -into a query parameter string, e.g. `foo=bar&baz=fizz...` - -If message, then the body will be the result of formatting the event according to message - -Otherwise, the event is sent as json. - -[id="{version}-plugins-{type}s-{plugin}-headers"] -===== `headers` - - * Value type is <> - * There is no default value for this setting. - -Custom headers to use -format is `headers => ["X-My-Header", "%{host}"]` - -[id="{version}-plugins-{type}s-{plugin}-http_compression"] -===== `http_compression` - - * Value type is <> - * Default value is `false` - -Enable request compression support. With this enabled the plugin will compress -http requests using gzip. - -[id="{version}-plugins-{type}s-{plugin}-http_method"] -===== `http_method` - - * This is a required setting. - * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` - * There is no default value for this setting. - -The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" - -[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] -===== `ignorable_codes` - - * Value type is <> - * There is no default value for this setting. - -If you would like to consider some non-2xx codes to be successes -enumerate them here. Responses returning these codes will be considered successes - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-mapping"] -===== `mapping` - - * Value type is <> - * There is no default value for this setting. - -This lets you choose the structure and parts of the event that are sent. - - -For example: -[source,ruby] - mapping => {"foo" => "%{host}" - "bar" => "%{type}"} - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_failed"] -===== `retry_failed` - - * Value type is <> - * Default value is `true` - -Set this to false if you don't want this output to retry failed requests - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] -===== `retryable_codes` - - * Value type is <> - * Default value is `[429, 500, 502, 503, 504]` - -If encountered as response codes this plugin will retry these requests - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] -===== `ssl_certificate_validation` - - * Value type is <> - * Default value is `true` - -Set this to false to disable SSL/TLS certificate validation -Note: setting this to false is generally considered insecure! - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -URL to use - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/http-v5.0.0.asciidoc b/docs/versioned-plugins/outputs/http-v5.0.0.asciidoc deleted file mode 100644 index 6aa7924ce..000000000 --- a/docs/versioned-plugins/outputs/http-v5.0.0.asciidoc +++ /dev/null @@ -1,369 +0,0 @@ -:plugin: http -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.0 -:release_date: 2017-08-02 -:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v5.0.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Http output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you send events to a generic HTTP(S) endpoint. - -This output will execute up to 'pool_max' requests in parallel for performance. -Consider this when tuning this plugin for performance. - -Additionally, note that when parallel execution is used strict ordering of events is not -guaranteed! - -Beware, this gem does not yet support codecs. Please use the 'format' option for now. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No -| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-content_type"] -===== `content_type` - - * Value type is <> - * There is no default value for this setting. - -Content type - -If not specified, this defaults to the following: - -* if format is "json", "application/json" -* if format is "form", "application/x-www-form-urlencoded" - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value can be any of: `json`, `form`, `message` - * Default value is `"json"` - -Set the format of the http body. - -If form, then the body will be the mapping (or whole event) converted -into a query parameter string, e.g. `foo=bar&baz=fizz...` - -If message, then the body will be the result of formatting the event according to message - -Otherwise, the event is sent as json. - -[id="{version}-plugins-{type}s-{plugin}-headers"] -===== `headers` - - * Value type is <> - * There is no default value for this setting. - -Custom headers to use -format is `headers => ["X-My-Header", "%{host}"]` - -[id="{version}-plugins-{type}s-{plugin}-http_method"] -===== `http_method` - - * This is a required setting. - * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` - * There is no default value for this setting. - -The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" - -[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] -===== `ignorable_codes` - - * Value type is <> - * There is no default value for this setting. - -If you would like to consider some non-2xx codes to be successes -enumerate them here. Responses returning these codes will be considered successes - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-mapping"] -===== `mapping` - - * Value type is <> - * There is no default value for this setting. - -This lets you choose the structure and parts of the event that are sent. - - -For example: -[source,ruby] - mapping => {"foo" => "%{host}" - "bar" => "%{type}"} - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_failed"] -===== `retry_failed` - - * Value type is <> - * Default value is `true` - -Set this to false if you don't want this output to retry failed requests - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] -===== `retryable_codes` - - * Value type is <> - * Default value is `[429, 500, 502, 503, 504]` - -If encountered as response codes this plugin will retry these requests - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -URL to use - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/http-v5.0.1.asciidoc b/docs/versioned-plugins/outputs/http-v5.0.1.asciidoc deleted file mode 100644 index 52bc55d47..000000000 --- a/docs/versioned-plugins/outputs/http-v5.0.1.asciidoc +++ /dev/null @@ -1,369 +0,0 @@ -:plugin: http -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.1 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v5.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Http output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you send events to a generic HTTP(S) endpoint. - -This output will execute up to 'pool_max' requests in parallel for performance. -Consider this when tuning this plugin for performance. - -Additionally, note that when parallel execution is used strict ordering of events is not -guaranteed! - -Beware, this gem does not yet support codecs. Please use the 'format' option for now. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No -| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-content_type"] -===== `content_type` - - * Value type is <> - * There is no default value for this setting. - -Content type - -If not specified, this defaults to the following: - -* if format is "json", "application/json" -* if format is "form", "application/x-www-form-urlencoded" - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value can be any of: `json`, `form`, `message` - * Default value is `"json"` - -Set the format of the http body. - -If form, then the body will be the mapping (or whole event) converted -into a query parameter string, e.g. `foo=bar&baz=fizz...` - -If message, then the body will be the result of formatting the event according to message - -Otherwise, the event is sent as json. - -[id="{version}-plugins-{type}s-{plugin}-headers"] -===== `headers` - - * Value type is <> - * There is no default value for this setting. - -Custom headers to use -format is `headers => ["X-My-Header", "%{host}"]` - -[id="{version}-plugins-{type}s-{plugin}-http_method"] -===== `http_method` - - * This is a required setting. - * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` - * There is no default value for this setting. - -The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" - -[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] -===== `ignorable_codes` - - * Value type is <> - * There is no default value for this setting. - -If you would like to consider some non-2xx codes to be successes -enumerate them here. Responses returning these codes will be considered successes - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-mapping"] -===== `mapping` - - * Value type is <> - * There is no default value for this setting. - -This lets you choose the structure and parts of the event that are sent. - - -For example: -[source,ruby] - mapping => {"foo" => "%{host}" - "bar" => "%{type}"} - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_failed"] -===== `retry_failed` - - * Value type is <> - * Default value is `true` - -Set this to false if you don't want this output to retry failed requests - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] -===== `retryable_codes` - - * Value type is <> - * Default value is `[429, 500, 502, 503, 504]` - -If encountered as response codes this plugin will retry these requests - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -URL to use - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/http-v5.1.0.asciidoc b/docs/versioned-plugins/outputs/http-v5.1.0.asciidoc deleted file mode 100644 index f21efdb39..000000000 --- a/docs/versioned-plugins/outputs/http-v5.1.0.asciidoc +++ /dev/null @@ -1,379 +0,0 @@ -:plugin: http -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.1.0 -:release_date: 2017-08-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v5.1.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Http output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you send events to a generic HTTP(S) endpoint. - -This output will execute up to 'pool_max' requests in parallel for performance. -Consider this when tuning this plugin for performance. - -Additionally, note that when parallel execution is used strict ordering of events is not -guaranteed! - -Beware, this gem does not yet support codecs. Please use the 'format' option for now. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No -| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-content_type"] -===== `content_type` - - * Value type is <> - * There is no default value for this setting. - -Content type - -If not specified, this defaults to the following: - -* if format is "json", "application/json" -* if format is "form", "application/x-www-form-urlencoded" - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value can be any of: `json`, `form`, `message` - * Default value is `"json"` - -Set the format of the http body. - -If form, then the body will be the mapping (or whole event) converted -into a query parameter string, e.g. `foo=bar&baz=fizz...` - -If message, then the body will be the result of formatting the event according to message - -Otherwise, the event is sent as json. - -[id="{version}-plugins-{type}s-{plugin}-headers"] -===== `headers` - - * Value type is <> - * There is no default value for this setting. - -Custom headers to use -format is `headers => ["X-My-Header", "%{host}"]` - -[id="{version}-plugins-{type}s-{plugin}-http_compression"] -===== `http_compression` - - * Value type is <> - * Default value is `false` - -Enable request compression support. With this enabled the plugin will compress -http requests using gzip. - -[id="{version}-plugins-{type}s-{plugin}-http_method"] -===== `http_method` - - * This is a required setting. - * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` - * There is no default value for this setting. - -The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" - -[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] -===== `ignorable_codes` - - * Value type is <> - * There is no default value for this setting. - -If you would like to consider some non-2xx codes to be successes -enumerate them here. Responses returning these codes will be considered successes - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-mapping"] -===== `mapping` - - * Value type is <> - * There is no default value for this setting. - -This lets you choose the structure and parts of the event that are sent. - - -For example: -[source,ruby] - mapping => {"foo" => "%{host}" - "bar" => "%{type}"} - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_failed"] -===== `retry_failed` - - * Value type is <> - * Default value is `true` - -Set this to false if you don't want this output to retry failed requests - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] -===== `retryable_codes` - - * Value type is <> - * Default value is `[429, 500, 502, 503, 504]` - -If encountered as response codes this plugin will retry these requests - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -URL to use - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/http-v5.1.1.asciidoc b/docs/versioned-plugins/outputs/http-v5.1.1.asciidoc deleted file mode 100644 index 1ea2e1a5f..000000000 --- a/docs/versioned-plugins/outputs/http-v5.1.1.asciidoc +++ /dev/null @@ -1,379 +0,0 @@ -:plugin: http -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.1.1 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v5.1.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Http output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you send events to a generic HTTP(S) endpoint. - -This output will execute up to 'pool_max' requests in parallel for performance. -Consider this when tuning this plugin for performance. - -Additionally, note that when parallel execution is used strict ordering of events is not -guaranteed! - -Beware, this gem does not yet support codecs. Please use the 'format' option for now. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No -| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-content_type"] -===== `content_type` - - * Value type is <> - * There is no default value for this setting. - -Content type - -If not specified, this defaults to the following: - -* if format is "json", "application/json" -* if format is "form", "application/x-www-form-urlencoded" - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value can be any of: `json`, `form`, `message` - * Default value is `"json"` - -Set the format of the http body. - -If form, then the body will be the mapping (or whole event) converted -into a query parameter string, e.g. `foo=bar&baz=fizz...` - -If message, then the body will be the result of formatting the event according to message - -Otherwise, the event is sent as json. - -[id="{version}-plugins-{type}s-{plugin}-headers"] -===== `headers` - - * Value type is <> - * There is no default value for this setting. - -Custom headers to use -format is `headers => ["X-My-Header", "%{host}"]` - -[id="{version}-plugins-{type}s-{plugin}-http_compression"] -===== `http_compression` - - * Value type is <> - * Default value is `false` - -Enable request compression support. With this enabled the plugin will compress -http requests using gzip. - -[id="{version}-plugins-{type}s-{plugin}-http_method"] -===== `http_method` - - * This is a required setting. - * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` - * There is no default value for this setting. - -The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" - -[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] -===== `ignorable_codes` - - * Value type is <> - * There is no default value for this setting. - -If you would like to consider some non-2xx codes to be successes -enumerate them here. Responses returning these codes will be considered successes - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-mapping"] -===== `mapping` - - * Value type is <> - * There is no default value for this setting. - -This lets you choose the structure and parts of the event that are sent. - - -For example: -[source,ruby] - mapping => {"foo" => "%{host}" - "bar" => "%{type}"} - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_failed"] -===== `retry_failed` - - * Value type is <> - * Default value is `true` - -Set this to false if you don't want this output to retry failed requests - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] -===== `retryable_codes` - - * Value type is <> - * Default value is `[429, 500, 502, 503, 504]` - -If encountered as response codes this plugin will retry these requests - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -URL to use - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/http-v5.1.2.asciidoc b/docs/versioned-plugins/outputs/http-v5.1.2.asciidoc deleted file mode 100644 index 2d64b3bd2..000000000 --- a/docs/versioned-plugins/outputs/http-v5.1.2.asciidoc +++ /dev/null @@ -1,379 +0,0 @@ -:plugin: http -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.1.2 -:release_date: 2018-01-09 -:changelog_url: https://github.com/logstash-plugins/logstash-output-http/blob/v5.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Http output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you send events to a generic HTTP(S) endpoint. - -This output will execute up to 'pool_max' requests in parallel for performance. -Consider this when tuning this plugin for performance. - -Additionally, note that when parallel execution is used strict ordering of events is not -guaranteed! - -Beware, this gem does not yet support codecs. Please use the 'format' option for now. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Http Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-automatic_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-content_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cookies>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-follow_redirects>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>, one of `["json", "form", "message"]`|No -| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_method>> |<>, one of `["put", "post", "patch", "delete", "get", "head"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-ignorable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keepalive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mapping>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_failed>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_non_idempotent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retryable_codes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-automatic_retries"] -===== `automatic_retries` - - * Value type is <> - * Default value is `1` - -How many times should the client retry a failing URL. We highly recommend NOT setting this value -to zero if keepalive is enabled. Some servers incorrectly end keepalives early requiring a retry! -Note: if `retry_non_idempotent` is set only GET, HEAD, PUT, DELETE, OPTIONS, and TRACE requests will be retried. - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-content_type"] -===== `content_type` - - * Value type is <> - * There is no default value for this setting. - -Content type - -If not specified, this defaults to the following: - -* if format is "json", "application/json" -* if format is "form", "application/x-www-form-urlencoded" - -[id="{version}-plugins-{type}s-{plugin}-cookies"] -===== `cookies` - - * Value type is <> - * Default value is `true` - -Enable cookie support. With this enabled the client will persist cookies -across requests as a normal web browser would. Enabled by default - -[id="{version}-plugins-{type}s-{plugin}-follow_redirects"] -===== `follow_redirects` - - * Value type is <> - * Default value is `true` - -Should redirects be followed? Defaults to `true` - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value can be any of: `json`, `form`, `message` - * Default value is `"json"` - -Set the format of the http body. - -If form, then the body will be the mapping (or whole event) converted -into a query parameter string, e.g. `foo=bar&baz=fizz...` - -If message, then the body will be the result of formatting the event according to message - -Otherwise, the event is sent as json. - -[id="{version}-plugins-{type}s-{plugin}-headers"] -===== `headers` - - * Value type is <> - * There is no default value for this setting. - -Custom headers to use -format is `headers => ["X-My-Header", "%{host}"]` - -[id="{version}-plugins-{type}s-{plugin}-http_compression"] -===== `http_compression` - - * Value type is <> - * Default value is `false` - -Enable request compression support. With this enabled the plugin will compress -http requests using gzip. - -[id="{version}-plugins-{type}s-{plugin}-http_method"] -===== `http_method` - - * This is a required setting. - * Value can be any of: `put`, `post`, `patch`, `delete`, `get`, `head` - * There is no default value for this setting. - -The HTTP Verb. One of "put", "post", "patch", "delete", "get", "head" - -[id="{version}-plugins-{type}s-{plugin}-ignorable_codes"] -===== `ignorable_codes` - - * Value type is <> - * There is no default value for this setting. - -If you would like to consider some non-2xx codes to be successes -enumerate them here. Responses returning these codes will be considered successes - -[id="{version}-plugins-{type}s-{plugin}-keepalive"] -===== `keepalive` - - * Value type is <> - * Default value is `true` - -Turn this on to enable HTTP keepalive support. We highly recommend setting `automatic_retries` to at least -one with this to fix interactions with broken keepalive implementations. - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-mapping"] -===== `mapping` - - * Value type is <> - * There is no default value for this setting. - -This lets you choose the structure and parts of the event that are sent. - - -For example: -[source,ruby] - mapping => {"foo" => "%{host}" - "bar" => "%{type}"} - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - -[id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] -===== `pool_max_per_route` - - * Value type is <> - * Default value is `25` - -Max number of concurrent connections to a single host. Defaults to `25` - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - -[id="{version}-plugins-{type}s-{plugin}-retry_failed"] -===== `retry_failed` - - * Value type is <> - * Default value is `true` - -Set this to false if you don't want this output to retry failed requests - -[id="{version}-plugins-{type}s-{plugin}-retry_non_idempotent"] -===== `retry_non_idempotent` - - * Value type is <> - * Default value is `false` - -If `automatic_retries` is enabled this will cause non-idempotent HTTP verbs (such as POST) to be retried. - -[id="{version}-plugins-{type}s-{plugin}-retryable_codes"] -===== `retryable_codes` - - * Value type is <> - * Default value is `[429, 500, 502, 503, 504]` - -If encountered as response codes this plugin will retry these requests - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -URL to use - -[id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] -===== `validate_after_inactivity` - - * Value type is <> - * Default value is `200` - -How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. -You may want to set this lower, possibly to 0 if you get connection errors regularly -Quoting the Apache commons docs (this client is based Apache Commmons): -'Defines period of inactivity in milliseconds after which persistent connections must be re-validated prior to being leased to the consumer. Non-positive value passed to this method disables connection validation. This check helps detect connections that have become stale (half-closed) while kept inactive in the pool.' -See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache/http/impl/conn/PoolingHttpClientConnectionManager.html#setValidateAfterInactivity(int)[these docs for more info] - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/icinga-index.asciidoc b/docs/versioned-plugins/outputs/icinga-index.asciidoc deleted file mode 100644 index 55ee44d43..000000000 --- a/docs/versioned-plugins/outputs/icinga-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: icinga -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/outputs/influxdb-index.asciidoc b/docs/versioned-plugins/outputs/influxdb-index.asciidoc deleted file mode 100644 index 152a1a7a9..000000000 --- a/docs/versioned-plugins/outputs/influxdb-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: influxdb -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::influxdb-v5.0.3.asciidoc[] -include::influxdb-v5.0.2.asciidoc[] -include::influxdb-v5.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/influxdb-v5.0.1.asciidoc b/docs/versioned-plugins/outputs/influxdb-v5.0.1.asciidoc deleted file mode 100644 index c758109af..000000000 --- a/docs/versioned-plugins/outputs/influxdb-v5.0.1.asciidoc +++ /dev/null @@ -1,270 +0,0 @@ -:plugin: influxdb -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-influxdb/blob/v5.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Influxdb output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you output Metrics to InfluxDB (>= 0.9.0-rc31) - -The configuration here attempts to be as friendly as possible -and minimize the need for multiple definitions to write to -multiple measurements and still be efficient - -the InfluxDB API let's you do some semblance of bulk operation -per http call but each call is database-specific - -You can learn more at http://influxdb.com[InfluxDB homepage] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Influxdb Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-allow_time_override>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-coerce_values>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-data_points>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-initial_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-measurement>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retention_policy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-send_as_tags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-time_precision>> |<>, one of `["n", "u", "ms", "s", "m", "h"]`|No -| <<{version}-plugins-{type}s-{plugin}-use_event_fields_for_data_points>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-allow_time_override"] -===== `allow_time_override` - - * Value type is <> - * Default value is `false` - -Allow the override of the `time` column in the event? - -By default any column with a name of `time` will be ignored and the time will -be determined by the value of `@timestamp`. - -Setting this to `true` allows you to explicitly set the `time` column yourself - -Note: **`time` must be an epoch value in either seconds, milliseconds or microseconds** - -[id="{version}-plugins-{type}s-{plugin}-coerce_values"] -===== `coerce_values` - - * Value type is <> - * Default value is `{}` - -Allow value coercion - -this will attempt to convert data point values to the appropriate type before posting -otherwise sprintf-filtered numeric values could get sent as strings -format is `{'column_name' => 'datatype'}` - -currently supported datatypes are `integer` and `float` - - -[id="{version}-plugins-{type}s-{plugin}-data_points"] -===== `data_points` - - * This is a required setting. - * Value type is <> - * Default value is `{}` - -Hash of key/value pairs representing data points to send to the named database -Example: `{'column1' => 'value1', 'column2' => 'value2'}` - -Events for the same measurement will be batched together where possible -Both keys and values support sprintf formatting - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * Value type is <> - * Default value is `"statistics"` - -The database to write - supports sprintf formatting - -[id="{version}-plugins-{type}s-{plugin}-exclude_fields"] -===== `exclude_fields` - - * Value type is <> - * Default value is `["@timestamp", "@version", "sequence", "message", "type"]` - -An array containing the names of fields from the event to exclude from the -data points - -Events, in general, contain keys "@version" and "@timestamp". Other plugins -may add others that you'll want to exclude (such as "command" from the -exec plugin). - -This only applies when use_event_fields_for_data_points is true. - -[id="{version}-plugins-{type}s-{plugin}-flush_size"] -===== `flush_size` - - * Value type is <> - * Default value is `100` - -This setting controls how many events will be buffered before sending a batch -of events. Note that these are only batched for the same measurement - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The hostname or IP address to reach your InfluxDB instance - -[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] -===== `idle_flush_time` - - * Value type is <> - * Default value is `1` - -The amount of time since last flush before a flush is forced. - -This setting helps ensure slow event rates don't get stuck in Logstash. -For example, if your `flush_size` is 100, and you have received 10 events, -and it has been more than `idle_flush_time` seconds since the last flush, -logstash will flush those 10 events automatically. - -This helps keep both fast and slow log streams moving along in -near-real-time. - -[id="{version}-plugins-{type}s-{plugin}-initial_delay"] -===== `initial_delay` - - * Value type is <> - * Default value is `1` - -The amount of time in seconds to delay the initial retry on connection failure. - -The delay will increase exponentially for each retry attempt (up to max_retries). - -[id="{version}-plugins-{type}s-{plugin}-max_retries"] -===== `max_retries` - - * Value type is <> - * Default value is `3` - -The number of time to retry recoverable errors before dropping the events. - -A value of -1 will cause the plugin to retry indefinately. -A value of 0 will cause the plugin to never retry. -Otherwise it will retry up to the specified mumber of times. - - -[id="{version}-plugins-{type}s-{plugin}-measurement"] -===== `measurement` - - * Value type is <> - * Default value is `"logstash"` - -Measurement name - supports sprintf formatting - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `nil` - -The password for the user who access to the named database - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `8086` - -The port for InfluxDB - -[id="{version}-plugins-{type}s-{plugin}-retention_policy"] -===== `retention_policy` - - * Value type is <> - * Default value is `"autogen"` - -The retention policy to use - -[id="{version}-plugins-{type}s-{plugin}-send_as_tags"] -===== `send_as_tags` - - * Value type is <> - * Default value is `["host"]` - -An array containing the names of fields to send to Influxdb as tags instead -of fields. Influxdb 0.9 convention is that values that do not change every -request should be considered metadata and given as tags. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `false` - -Enable SSL/TLS secured communication to InfluxDB - -[id="{version}-plugins-{type}s-{plugin}-time_precision"] -===== `time_precision` - - * Value can be any of: `n`, `u`, `ms`, `s`, `m`, `h` - * Default value is `"ms"` - -Set the level of precision of `time` - -only useful when overriding the time value - -[id="{version}-plugins-{type}s-{plugin}-use_event_fields_for_data_points"] -===== `use_event_fields_for_data_points` - - * Value type is <> - * Default value is `false` - -Automatically use fields from the event as the data points sent to Influxdb - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `nil` - -The user who has access to the named database - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/influxdb-v5.0.2.asciidoc b/docs/versioned-plugins/outputs/influxdb-v5.0.2.asciidoc deleted file mode 100644 index 734b8f0b0..000000000 --- a/docs/versioned-plugins/outputs/influxdb-v5.0.2.asciidoc +++ /dev/null @@ -1,270 +0,0 @@ -:plugin: influxdb -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.2 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-influxdb/blob/v5.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Influxdb output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you output Metrics to InfluxDB (>= 0.9.0-rc31) - -The configuration here attempts to be as friendly as possible -and minimize the need for multiple definitions to write to -multiple measurements and still be efficient - -the InfluxDB API let's you do some semblance of bulk operation -per http call but each call is database-specific - -You can learn more at http://influxdb.com[InfluxDB homepage] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Influxdb Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-allow_time_override>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-coerce_values>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-data_points>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-initial_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-measurement>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retention_policy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-send_as_tags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-time_precision>> |<>, one of `["n", "u", "ms", "s", "m", "h"]`|No -| <<{version}-plugins-{type}s-{plugin}-use_event_fields_for_data_points>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-allow_time_override"] -===== `allow_time_override` - - * Value type is <> - * Default value is `false` - -Allow the override of the `time` column in the event? - -By default any column with a name of `time` will be ignored and the time will -be determined by the value of `@timestamp`. - -Setting this to `true` allows you to explicitly set the `time` column yourself - -Note: **`time` must be an epoch value in either seconds, milliseconds or microseconds** - -[id="{version}-plugins-{type}s-{plugin}-coerce_values"] -===== `coerce_values` - - * Value type is <> - * Default value is `{}` - -Allow value coercion - -this will attempt to convert data point values to the appropriate type before posting -otherwise sprintf-filtered numeric values could get sent as strings -format is `{'column_name' => 'datatype'}` - -currently supported datatypes are `integer` and `float` - - -[id="{version}-plugins-{type}s-{plugin}-data_points"] -===== `data_points` - - * This is a required setting. - * Value type is <> - * Default value is `{}` - -Hash of key/value pairs representing data points to send to the named database -Example: `{'column1' => 'value1', 'column2' => 'value2'}` - -Events for the same measurement will be batched together where possible -Both keys and values support sprintf formatting - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * Value type is <> - * Default value is `"statistics"` - -The database to write - supports sprintf formatting - -[id="{version}-plugins-{type}s-{plugin}-exclude_fields"] -===== `exclude_fields` - - * Value type is <> - * Default value is `["@timestamp", "@version", "sequence", "message", "type"]` - -An array containing the names of fields from the event to exclude from the -data points - -Events, in general, contain keys "@version" and "@timestamp". Other plugins -may add others that you'll want to exclude (such as "command" from the -exec plugin). - -This only applies when use_event_fields_for_data_points is true. - -[id="{version}-plugins-{type}s-{plugin}-flush_size"] -===== `flush_size` - - * Value type is <> - * Default value is `100` - -This setting controls how many events will be buffered before sending a batch -of events. Note that these are only batched for the same measurement - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The hostname or IP address to reach your InfluxDB instance - -[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] -===== `idle_flush_time` - - * Value type is <> - * Default value is `1` - -The amount of time since last flush before a flush is forced. - -This setting helps ensure slow event rates don't get stuck in Logstash. -For example, if your `flush_size` is 100, and you have received 10 events, -and it has been more than `idle_flush_time` seconds since the last flush, -logstash will flush those 10 events automatically. - -This helps keep both fast and slow log streams moving along in -near-real-time. - -[id="{version}-plugins-{type}s-{plugin}-initial_delay"] -===== `initial_delay` - - * Value type is <> - * Default value is `1` - -The amount of time in seconds to delay the initial retry on connection failure. - -The delay will increase exponentially for each retry attempt (up to max_retries). - -[id="{version}-plugins-{type}s-{plugin}-max_retries"] -===== `max_retries` - - * Value type is <> - * Default value is `3` - -The number of time to retry recoverable errors before dropping the events. - -A value of -1 will cause the plugin to retry indefinately. -A value of 0 will cause the plugin to never retry. -Otherwise it will retry up to the specified mumber of times. - - -[id="{version}-plugins-{type}s-{plugin}-measurement"] -===== `measurement` - - * Value type is <> - * Default value is `"logstash"` - -Measurement name - supports sprintf formatting - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `nil` - -The password for the user who access to the named database - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `8086` - -The port for InfluxDB - -[id="{version}-plugins-{type}s-{plugin}-retention_policy"] -===== `retention_policy` - - * Value type is <> - * Default value is `"autogen"` - -The retention policy to use - -[id="{version}-plugins-{type}s-{plugin}-send_as_tags"] -===== `send_as_tags` - - * Value type is <> - * Default value is `["host"]` - -An array containing the names of fields to send to Influxdb as tags instead -of fields. Influxdb 0.9 convention is that values that do not change every -request should be considered metadata and given as tags. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `false` - -Enable SSL/TLS secured communication to InfluxDB - -[id="{version}-plugins-{type}s-{plugin}-time_precision"] -===== `time_precision` - - * Value can be any of: `n`, `u`, `ms`, `s`, `m`, `h` - * Default value is `"ms"` - -Set the level of precision of `time` - -only useful when overriding the time value - -[id="{version}-plugins-{type}s-{plugin}-use_event_fields_for_data_points"] -===== `use_event_fields_for_data_points` - - * Value type is <> - * Default value is `false` - -Automatically use fields from the event as the data points sent to Influxdb - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `nil` - -The user who has access to the named database - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/influxdb-v5.0.3.asciidoc b/docs/versioned-plugins/outputs/influxdb-v5.0.3.asciidoc deleted file mode 100644 index 1a06fe59f..000000000 --- a/docs/versioned-plugins/outputs/influxdb-v5.0.3.asciidoc +++ /dev/null @@ -1,270 +0,0 @@ -:plugin: influxdb -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-influxdb/blob/v5.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Influxdb output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you output Metrics to InfluxDB (>= 0.9.0-rc31) - -The configuration here attempts to be as friendly as possible -and minimize the need for multiple definitions to write to -multiple measurements and still be efficient - -the InfluxDB API let's you do some semblance of bulk operation -per http call but each call is database-specific - -You can learn more at http://influxdb.com[InfluxDB homepage] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Influxdb Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-allow_time_override>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-coerce_values>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-data_points>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-initial_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-measurement>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retention_policy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-send_as_tags>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-time_precision>> |<>, one of `["n", "u", "ms", "s", "m", "h"]`|No -| <<{version}-plugins-{type}s-{plugin}-use_event_fields_for_data_points>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-allow_time_override"] -===== `allow_time_override` - - * Value type is <> - * Default value is `false` - -Allow the override of the `time` column in the event? - -By default any column with a name of `time` will be ignored and the time will -be determined by the value of `@timestamp`. - -Setting this to `true` allows you to explicitly set the `time` column yourself - -Note: **`time` must be an epoch value in either seconds, milliseconds or microseconds** - -[id="{version}-plugins-{type}s-{plugin}-coerce_values"] -===== `coerce_values` - - * Value type is <> - * Default value is `{}` - -Allow value coercion - -this will attempt to convert data point values to the appropriate type before posting -otherwise sprintf-filtered numeric values could get sent as strings -format is `{'column_name' => 'datatype'}` - -currently supported datatypes are `integer` and `float` - - -[id="{version}-plugins-{type}s-{plugin}-data_points"] -===== `data_points` - - * This is a required setting. - * Value type is <> - * Default value is `{}` - -Hash of key/value pairs representing data points to send to the named database -Example: `{'column1' => 'value1', 'column2' => 'value2'}` - -Events for the same measurement will be batched together where possible -Both keys and values support sprintf formatting - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * Value type is <> - * Default value is `"statistics"` - -The database to write - supports sprintf formatting - -[id="{version}-plugins-{type}s-{plugin}-exclude_fields"] -===== `exclude_fields` - - * Value type is <> - * Default value is `["@timestamp", "@version", "sequence", "message", "type"]` - -An array containing the names of fields from the event to exclude from the -data points - -Events, in general, contain keys "@version" and "@timestamp". Other plugins -may add others that you'll want to exclude (such as "command" from the -exec plugin). - -This only applies when use_event_fields_for_data_points is true. - -[id="{version}-plugins-{type}s-{plugin}-flush_size"] -===== `flush_size` - - * Value type is <> - * Default value is `100` - -This setting controls how many events will be buffered before sending a batch -of events. Note that these are only batched for the same measurement - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The hostname or IP address to reach your InfluxDB instance - -[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] -===== `idle_flush_time` - - * Value type is <> - * Default value is `1` - -The amount of time since last flush before a flush is forced. - -This setting helps ensure slow event rates don't get stuck in Logstash. -For example, if your `flush_size` is 100, and you have received 10 events, -and it has been more than `idle_flush_time` seconds since the last flush, -logstash will flush those 10 events automatically. - -This helps keep both fast and slow log streams moving along in -near-real-time. - -[id="{version}-plugins-{type}s-{plugin}-initial_delay"] -===== `initial_delay` - - * Value type is <> - * Default value is `1` - -The amount of time in seconds to delay the initial retry on connection failure. - -The delay will increase exponentially for each retry attempt (up to max_retries). - -[id="{version}-plugins-{type}s-{plugin}-max_retries"] -===== `max_retries` - - * Value type is <> - * Default value is `3` - -The number of time to retry recoverable errors before dropping the events. - -A value of -1 will cause the plugin to retry indefinately. -A value of 0 will cause the plugin to never retry. -Otherwise it will retry up to the specified mumber of times. - - -[id="{version}-plugins-{type}s-{plugin}-measurement"] -===== `measurement` - - * Value type is <> - * Default value is `"logstash"` - -Measurement name - supports sprintf formatting - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `nil` - -The password for the user who access to the named database - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `8086` - -The port for InfluxDB - -[id="{version}-plugins-{type}s-{plugin}-retention_policy"] -===== `retention_policy` - - * Value type is <> - * Default value is `"autogen"` - -The retention policy to use - -[id="{version}-plugins-{type}s-{plugin}-send_as_tags"] -===== `send_as_tags` - - * Value type is <> - * Default value is `["host"]` - -An array containing the names of fields to send to Influxdb as tags instead -of fields. Influxdb 0.9 convention is that values that do not change every -request should be considered metadata and given as tags. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `false` - -Enable SSL/TLS secured communication to InfluxDB - -[id="{version}-plugins-{type}s-{plugin}-time_precision"] -===== `time_precision` - - * Value can be any of: `n`, `u`, `ms`, `s`, `m`, `h` - * Default value is `"ms"` - -Set the level of precision of `time` - -only useful when overriding the time value - -[id="{version}-plugins-{type}s-{plugin}-use_event_fields_for_data_points"] -===== `use_event_fields_for_data_points` - - * Value type is <> - * Default value is `false` - -Automatically use fields from the event as the data points sent to Influxdb - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `nil` - -The user who has access to the named database - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/irc-index.asciidoc b/docs/versioned-plugins/outputs/irc-index.asciidoc deleted file mode 100644 index 813d0eed3..000000000 --- a/docs/versioned-plugins/outputs/irc-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: irc -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::irc-v3.0.5.asciidoc[] -include::irc-v3.0.4.asciidoc[] -include::irc-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/irc-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/irc-v3.0.3.asciidoc deleted file mode 100644 index 18890070c..000000000 --- a/docs/versioned-plugins/outputs/irc-v3.0.3.asciidoc +++ /dev/null @@ -1,157 +0,0 @@ -:plugin: irc -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-irc/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Irc output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events to IRC - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Irc Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-messages_per_second>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nick>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-post_string>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pre_string>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-real>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-channels"] -===== `channels` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Channels to broadcast to. - -These should be full channel names including the '#' symbol, such as -"#logstash". - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * Default value is `"%{message}"` - -Message format to send, event tokens are usable here - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Address of the host to connect to - -[id="{version}-plugins-{type}s-{plugin}-messages_per_second"] -===== `messages_per_second` - - * Value type is <> - * Default value is `0.5` - -Limit the rate of messages sent to IRC in messages per second. - -[id="{version}-plugins-{type}s-{plugin}-nick"] -===== `nick` - - * Value type is <> - * Default value is `"logstash"` - -IRC Nickname - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -IRC server password - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6667` - -Port on host to connect to. - -[id="{version}-plugins-{type}s-{plugin}-post_string"] -===== `post_string` - - * Value type is <> - * There is no default value for this setting. - -Static string after event - -[id="{version}-plugins-{type}s-{plugin}-pre_string"] -===== `pre_string` - - * Value type is <> - * There is no default value for this setting. - -Static string before event - -[id="{version}-plugins-{type}s-{plugin}-real"] -===== `real` - - * Value type is <> - * Default value is `"logstash"` - -IRC Real name - -[id="{version}-plugins-{type}s-{plugin}-secure"] -===== `secure` - - * Value type is <> - * Default value is `false` - -Set this to true to enable SSL. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"logstash"` - -IRC Username - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/irc-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/irc-v3.0.4.asciidoc deleted file mode 100644 index bc527f22a..000000000 --- a/docs/versioned-plugins/outputs/irc-v3.0.4.asciidoc +++ /dev/null @@ -1,157 +0,0 @@ -:plugin: irc -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-irc/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Irc output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events to IRC - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Irc Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-messages_per_second>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nick>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-post_string>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pre_string>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-real>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-channels"] -===== `channels` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Channels to broadcast to. - -These should be full channel names including the '#' symbol, such as -"#logstash". - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * Default value is `"%{message}"` - -Message format to send, event tokens are usable here - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Address of the host to connect to - -[id="{version}-plugins-{type}s-{plugin}-messages_per_second"] -===== `messages_per_second` - - * Value type is <> - * Default value is `0.5` - -Limit the rate of messages sent to IRC in messages per second. - -[id="{version}-plugins-{type}s-{plugin}-nick"] -===== `nick` - - * Value type is <> - * Default value is `"logstash"` - -IRC Nickname - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -IRC server password - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6667` - -Port on host to connect to. - -[id="{version}-plugins-{type}s-{plugin}-post_string"] -===== `post_string` - - * Value type is <> - * There is no default value for this setting. - -Static string after event - -[id="{version}-plugins-{type}s-{plugin}-pre_string"] -===== `pre_string` - - * Value type is <> - * There is no default value for this setting. - -Static string before event - -[id="{version}-plugins-{type}s-{plugin}-real"] -===== `real` - - * Value type is <> - * Default value is `"logstash"` - -IRC Real name - -[id="{version}-plugins-{type}s-{plugin}-secure"] -===== `secure` - - * Value type is <> - * Default value is `false` - -Set this to true to enable SSL. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"logstash"` - -IRC Username - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/irc-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/irc-v3.0.5.asciidoc deleted file mode 100644 index 1b27da973..000000000 --- a/docs/versioned-plugins/outputs/irc-v3.0.5.asciidoc +++ /dev/null @@ -1,157 +0,0 @@ -:plugin: irc -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-irc/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Irc output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events to IRC - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Irc Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-messages_per_second>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nick>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-post_string>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pre_string>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-real>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-secure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-channels"] -===== `channels` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Channels to broadcast to. - -These should be full channel names including the '#' symbol, such as -"#logstash". - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * Default value is `"%{message}"` - -Message format to send, event tokens are usable here - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Address of the host to connect to - -[id="{version}-plugins-{type}s-{plugin}-messages_per_second"] -===== `messages_per_second` - - * Value type is <> - * Default value is `0.5` - -Limit the rate of messages sent to IRC in messages per second. - -[id="{version}-plugins-{type}s-{plugin}-nick"] -===== `nick` - - * Value type is <> - * Default value is `"logstash"` - -IRC Nickname - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -IRC server password - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6667` - -Port on host to connect to. - -[id="{version}-plugins-{type}s-{plugin}-post_string"] -===== `post_string` - - * Value type is <> - * There is no default value for this setting. - -Static string after event - -[id="{version}-plugins-{type}s-{plugin}-pre_string"] -===== `pre_string` - - * Value type is <> - * There is no default value for this setting. - -Static string before event - -[id="{version}-plugins-{type}s-{plugin}-real"] -===== `real` - - * Value type is <> - * Default value is `"logstash"` - -IRC Real name - -[id="{version}-plugins-{type}s-{plugin}-secure"] -===== `secure` - - * Value type is <> - * Default value is `false` - -Set this to true to enable SSL. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"logstash"` - -IRC Username - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/jira-index.asciidoc b/docs/versioned-plugins/outputs/jira-index.asciidoc deleted file mode 100644 index f14aedef8..000000000 --- a/docs/versioned-plugins/outputs/jira-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: jira -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::jira-v3.0.3.asciidoc[] -include::jira-v3.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/jira-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/jira-v3.0.2.asciidoc deleted file mode 100644 index 67c0e63c6..000000000 --- a/docs/versioned-plugins/outputs/jira-v3.0.2.asciidoc +++ /dev/null @@ -1,195 +0,0 @@ -:plugin: jira -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-jira/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Jira output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output allows you to use Logstash to parse and structure -your logs and ship structured event data to JIRA. - -Structured event data will be added to the JIRA issue as 'Description' field value. - -Example JSON-encoded event: - -[source,yaml] ------------------------------------------------------------------------------ - { - "message": "Hello JIRA!", - "@version": "1", - "@timestamp": "2015-06-04T10:23:30.279Z", - "type": "syslog", - "host": "192.168.1.42", - "syslog_pri": "11", - "syslog_timestamp": "Jun 4 14:23:30", - "syslog_host": "myhost", - "program": "root", - "syslog_severity_code": 3, - "syslog_facility_code": 1, - "syslog_facility": "user-level", - "syslog_severity": "error" - } ------------------------------------------------------------------------------ - -Example JIRA issue created the event above: - -[source,shell] ------------------------------------------------------------------------------ - Type: Task - Priority: 2 - Major - Status: TO DO - Resolution: Unresolved - Summary: [logstash] Hello JIRA! - Description: - --- - message: Hello JIRA! - '@version': '1' - '@timestamp': 2015-06-04 10:23:30.279000000 Z - type: syslog - host: 192.168.1.42 - syslog_pri: '11' - syslog_timestamp: Jun 4 14:23:30 - syslog_host: myhost - program: root - syslog_severity_code: 3 - syslog_facility_code: 1 - syslog_facility: user-level - syslog_severity: error ------------------------------------------------------------------------------ - -To use this output you'll need to ensure that your JIRA instance allows REST calls. - -This output uses `jiralicious` as the bridge to JIRA -By Martin Cleaver, Blended Perspectives -with a lot of help from 'electrical' in #logstash. - -Origin -and -via . - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jira Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-assignee>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-issuetypeid>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-priority>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-projectid>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-reporter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-summary>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-assignee"] -===== `assignee` - - * Value type is <> - * There is no default value for this setting. - -JIRA Reporter - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * There is no default value for this setting. - -The hostname to send logs to. This should target your JIRA server -and has to have the REST interface enabled. - -[id="{version}-plugins-{type}s-{plugin}-issuetypeid"] -===== `issuetypeid` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JIRA Issuetype number - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-priority"] -===== `priority` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JIRA Priority - -[id="{version}-plugins-{type}s-{plugin}-projectid"] -===== `projectid` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Javalicious has no proxy support -JIRA Project number - -[id="{version}-plugins-{type}s-{plugin}-reporter"] -===== `reporter` - - * Value type is <> - * There is no default value for this setting. - -JIRA Reporter - -[id="{version}-plugins-{type}s-{plugin}-summary"] -===== `summary` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JIRA Summary - -Truncated and appended with '...' if longer than 255 characters. - -[id="{version}-plugins-{type}s-{plugin}-username"] -===== `username` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/jira-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/jira-v3.0.3.asciidoc deleted file mode 100644 index e32e496b7..000000000 --- a/docs/versioned-plugins/outputs/jira-v3.0.3.asciidoc +++ /dev/null @@ -1,195 +0,0 @@ -:plugin: jira -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-jira/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Jira output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output allows you to use Logstash to parse and structure -your logs and ship structured event data to JIRA. - -Structured event data will be added to the JIRA issue as 'Description' field value. - -Example JSON-encoded event: - -[source,yaml] ------------------------------------------------------------------------------ - { - "message": "Hello JIRA!", - "@version": "1", - "@timestamp": "2015-06-04T10:23:30.279Z", - "type": "syslog", - "host": "192.168.1.42", - "syslog_pri": "11", - "syslog_timestamp": "Jun 4 14:23:30", - "syslog_host": "myhost", - "program": "root", - "syslog_severity_code": 3, - "syslog_facility_code": 1, - "syslog_facility": "user-level", - "syslog_severity": "error" - } ------------------------------------------------------------------------------ - -Example JIRA issue created the event above: - -[source,shell] ------------------------------------------------------------------------------ - Type: Task - Priority: 2 - Major - Status: TO DO - Resolution: Unresolved - Summary: [logstash] Hello JIRA! - Description: - --- - message: Hello JIRA! - '@version': '1' - '@timestamp': 2015-06-04 10:23:30.279000000 Z - type: syslog - host: 192.168.1.42 - syslog_pri: '11' - syslog_timestamp: Jun 4 14:23:30 - syslog_host: myhost - program: root - syslog_severity_code: 3 - syslog_facility_code: 1 - syslog_facility: user-level - syslog_severity: error ------------------------------------------------------------------------------ - -To use this output you'll need to ensure that your JIRA instance allows REST calls. - -This output uses `jiralicious` as the bridge to JIRA -By Martin Cleaver, Blended Perspectives -with a lot of help from 'electrical' in #logstash. - -Origin -and -via . - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jira Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-assignee>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-issuetypeid>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-priority>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-projectid>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-reporter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-summary>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-assignee"] -===== `assignee` - - * Value type is <> - * There is no default value for this setting. - -JIRA Reporter - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * There is no default value for this setting. - -The hostname to send logs to. This should target your JIRA server -and has to have the REST interface enabled. - -[id="{version}-plugins-{type}s-{plugin}-issuetypeid"] -===== `issuetypeid` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JIRA Issuetype number - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-priority"] -===== `priority` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JIRA Priority - -[id="{version}-plugins-{type}s-{plugin}-projectid"] -===== `projectid` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Javalicious has no proxy support -JIRA Project number - -[id="{version}-plugins-{type}s-{plugin}-reporter"] -===== `reporter` - - * Value type is <> - * There is no default value for this setting. - -JIRA Reporter - -[id="{version}-plugins-{type}s-{plugin}-summary"] -===== `summary` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -JIRA Summary - -Truncated and appended with '...' if longer than 255 characters. - -[id="{version}-plugins-{type}s-{plugin}-username"] -===== `username` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/jms-index.asciidoc b/docs/versioned-plugins/outputs/jms-index.asciidoc deleted file mode 100644 index 4c24c6414..000000000 --- a/docs/versioned-plugins/outputs/jms-index.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -:plugin: jms -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-06-23 -|======================================================================= - -include::jms-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/jms-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/jms-v3.0.1.asciidoc deleted file mode 100644 index 444a5af7e..000000000 --- a/docs/versioned-plugins/outputs/jms-v3.0.1.asciidoc +++ /dev/null @@ -1,175 +0,0 @@ -:plugin: jms -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-jms/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Jms output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events to a Jms Broker. Supports both Jms Queues and Topics. - -For more information about Jms, see -For more information about the Ruby Gem used, see -Here is a config example : - jms { - include_header => false - include_properties => false - include_body => true - use_jms_timestamp => false - queue_name => "myqueue" - yaml_file => "~/jms.yml" - yaml_section => "mybroker" - } - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Jms Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-broker_url>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-delivery_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-factory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jndi_context>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-jndi_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pub_sub>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-require_jars>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-yaml_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-yaml_section>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-broker_url"] -===== `broker_url` - - * Value type is <> - * There is no default value for this setting. - -Url to use when connecting to the JMS provider - -[id="{version}-plugins-{type}s-{plugin}-delivery_mode"] -===== `delivery_mode` - - * Value type is <> - * Default value is `nil` - -Name of delivery mode to use -Options are "persistent" and "non_persistent" if not defined nothing will be passed. - -[id="{version}-plugins-{type}s-{plugin}-destination"] -===== `destination` - - * Value type is <> - * There is no default value for this setting. - -Name of the destination queue or topic to use. -Mandatory - -[id="{version}-plugins-{type}s-{plugin}-factory"] -===== `factory` - - * Value type is <> - * There is no default value for this setting. - -Name of JMS Provider Factory class - -[id="{version}-plugins-{type}s-{plugin}-jndi_context"] -===== `jndi_context` - - * Value type is <> - * There is no default value for this setting. - -Mandatory if jndi lookup is being used, -contains details on how to connect to JNDI server - -[id="{version}-plugins-{type}s-{plugin}-jndi_name"] -===== `jndi_name` - - * Value type is <> - * There is no default value for this setting. - -Name of JNDI entry at which the Factory can be found - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to use when connecting to the JMS provider - -[id="{version}-plugins-{type}s-{plugin}-pub_sub"] -===== `pub_sub` - - * Value type is <> - * Default value is `false` - -If pub-sub (topic) style should be used or not. -Mandatory - -[id="{version}-plugins-{type}s-{plugin}-require_jars"] -===== `require_jars` - - * Value type is <> - * There is no default value for this setting. - -If you do not use an yaml configuration use either the factory or jndi_name. -An optional array of Jar file names to load for the specified -JMS provider. By using this option it is not necessary -to put all the JMS Provider specific jar files into the -java CLASSPATH prior to starting Logstash. - -[id="{version}-plugins-{type}s-{plugin}-username"] -===== `username` - - * Value type is <> - * There is no default value for this setting. - -Username to connect to JMS provider with - -[id="{version}-plugins-{type}s-{plugin}-yaml_file"] -===== `yaml_file` - - * Value type is <> - * There is no default value for this setting. - -Yaml config file - -[id="{version}-plugins-{type}s-{plugin}-yaml_section"] -===== `yaml_section` - - * Value type is <> - * There is no default value for this setting. - -Yaml config file section name -For some known examples, see: [Example jms.yml](https://github.com/reidmorrison/jruby-jms/blob/master/examples/jms.yml) - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/juggernaut-index.asciidoc b/docs/versioned-plugins/outputs/juggernaut-index.asciidoc deleted file mode 100644 index ff9fdb2b9..000000000 --- a/docs/versioned-plugins/outputs/juggernaut-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: juggernaut -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::juggernaut-v3.0.5.asciidoc[] -include::juggernaut-v3.0.4.asciidoc[] -include::juggernaut-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/juggernaut-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/juggernaut-v3.0.3.asciidoc deleted file mode 100644 index 844c8944e..000000000 --- a/docs/versioned-plugins/outputs/juggernaut-v3.0.3.asciidoc +++ /dev/null @@ -1,115 +0,0 @@ -:plugin: juggernaut -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-juggernaut/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Juggernaut output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push messages to the juggernaut websockets server: - -* https://github.com/maccman/juggernaut - -Wraps Websockets and supports other methods (including xhr longpolling) This -is basically, just an extension of the redis output (Juggernaut pulls -messages from redis). But it pushes messages to a particular channel and -formats the messages in the way juggernaut expects. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Juggernaut Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-channels"] -===== `channels` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -List of channels to which to publish. Dynamic names are -valid here, for example `logstash-%{type}`. - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * Value type is <> - * Default value is `0` - -The redis database number. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"127.0.0.1"` - -The hostname of the redis server to which juggernaut is listening. - -[id="{version}-plugins-{type}s-{plugin}-message_format"] -===== `message_format` - - * Value type is <> - * There is no default value for this setting. - -How should the message be formatted before pushing to the websocket. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to authenticate with. There is no authentication by default. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6379` - -The port to connect on. - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `5` - -Redis initial connection timeout in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/juggernaut-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/juggernaut-v3.0.4.asciidoc deleted file mode 100644 index 471d40797..000000000 --- a/docs/versioned-plugins/outputs/juggernaut-v3.0.4.asciidoc +++ /dev/null @@ -1,115 +0,0 @@ -:plugin: juggernaut -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-juggernaut/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Juggernaut output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push messages to the juggernaut websockets server: - -* https://github.com/maccman/juggernaut - -Wraps Websockets and supports other methods (including xhr longpolling) This -is basically, just an extension of the redis output (Juggernaut pulls -messages from redis). But it pushes messages to a particular channel and -formats the messages in the way juggernaut expects. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Juggernaut Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-channels"] -===== `channels` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -List of channels to which to publish. Dynamic names are -valid here, for example `logstash-%{type}`. - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * Value type is <> - * Default value is `0` - -The redis database number. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"127.0.0.1"` - -The hostname of the redis server to which juggernaut is listening. - -[id="{version}-plugins-{type}s-{plugin}-message_format"] -===== `message_format` - - * Value type is <> - * There is no default value for this setting. - -How should the message be formatted before pushing to the websocket. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to authenticate with. There is no authentication by default. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6379` - -The port to connect on. - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `5` - -Redis initial connection timeout in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/juggernaut-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/juggernaut-v3.0.5.asciidoc deleted file mode 100644 index eb81df870..000000000 --- a/docs/versioned-plugins/outputs/juggernaut-v3.0.5.asciidoc +++ /dev/null @@ -1,115 +0,0 @@ -:plugin: juggernaut -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-juggernaut/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Juggernaut output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push messages to the juggernaut websockets server: - -* https://github.com/maccman/juggernaut - -Wraps Websockets and supports other methods (including xhr longpolling) This -is basically, just an extension of the redis output (Juggernaut pulls -messages from redis). But it pushes messages to a particular channel and -formats the messages in the way juggernaut expects. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Juggernaut Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-channels>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-channels"] -===== `channels` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -List of channels to which to publish. Dynamic names are -valid here, for example `logstash-%{type}`. - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * Value type is <> - * Default value is `0` - -The redis database number. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"127.0.0.1"` - -The hostname of the redis server to which juggernaut is listening. - -[id="{version}-plugins-{type}s-{plugin}-message_format"] -===== `message_format` - - * Value type is <> - * There is no default value for this setting. - -How should the message be formatted before pushing to the websocket. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to authenticate with. There is no authentication by default. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6379` - -The port to connect on. - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `5` - -Redis initial connection timeout in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/kafka-index.asciidoc b/docs/versioned-plugins/outputs/kafka-index.asciidoc deleted file mode 100644 index 774f3ab87..000000000 --- a/docs/versioned-plugins/outputs/kafka-index.asciidoc +++ /dev/null @@ -1,26 +0,0 @@ -:plugin: kafka -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2018-01-05 -| <> | 2017-10-25 -| <> | 2017-10-09 -| <> | 2017-08-16 -| <> | 2017-08-01 -| <> | 2017-07-11 -| <> | 2017-06-23 -| <> | 2017-05-11 -|======================================================================= - -include::kafka-v7.0.6.asciidoc[] -include::kafka-v7.0.4.asciidoc[] -include::kafka-v7.0.3.asciidoc[] -include::kafka-v7.0.1.asciidoc[] -include::kafka-v7.0.0.asciidoc[] -include::kafka-v6.2.2.asciidoc[] -include::kafka-v6.2.1.asciidoc[] -include::kafka-v6.2.0.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/kafka-v6.2.0.asciidoc b/docs/versioned-plugins/outputs/kafka-v6.2.0.asciidoc deleted file mode 100644 index 9f5e0fe01..000000000 --- a/docs/versioned-plugins/outputs/kafka-v6.2.0.asciidoc +++ /dev/null @@ -1,449 +0,0 @@ -:plugin: kafka -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v6.2.0 -:release_date: 2017-05-11 -:changelog_url: https://github.com/logstash-plugins/logstash-output-kafka/blob/v6.2.0/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Kafka - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on -the broker. - -Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination -of Logstash and the Kafka output plugin: - -[options="header"] -|========================================================== -|Kafka Client Version |Logstash Version |Plugin Version |Why? -|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular -|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) -|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) -|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker -|0.10.1.x |2.4.x - 5.x.x | 6.x.x | -|========================================================== - -NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should -upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker -is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. - -This output supports connecting to Kafka over: - -* SSL (requires plugin version 3.0.0 or later) -* Kerberos SASL (requires plugin version 5.1.0 or later) - -By default security is disabled but can be turned on as needed. - -The only required configuration is the topic_id. The default codec is plain, -so events will be persisted on the broker in plain format. Logstash will encode your messages with not -only the message but also with a timestamp and hostname. If you do not want anything but your message -passing through, you should make the output configuration something like: -[source,ruby] - output { - kafka { - codec => plain { - format => "%{message}" - } - topic_id => "mytopic" - } - } -For more information see http://kafka.apache.org/documentation.html#theproducer - -Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kafka Output Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-acks>> |<>, one of `["0", "1", "all"]`|No -| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-buffer_memory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-compression_type>> |<>, one of `["none", "gzip", "snappy", "lz4"]`|No -| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-key_serializer>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-linger_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_request_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No -| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topic_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-value_serializer>> |<>|No -|======================================================================= - -Also see <> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-acks"] -===== `acks` - - * Value can be any of: `0`, `1`, `all` - * Default value is `"1"` - -The number of acknowledgments the producer requires the leader to have received -before considering a request complete. - -acks=0, the producer will not wait for any acknowledgment from the server at all. -acks=1, This will mean the leader will write the record to its local log but - will respond without awaiting full acknowledgement from all followers. -acks=all, This means the leader will wait for the full set of in-sync replicas to acknowledge the record. - -[id="{version}-plugins-{type}s-{plugin}-batch_size"] -===== `batch_size` - - * Value type is <> - * Default value is `16384` - -The producer will attempt to batch records together into fewer requests whenever multiple -records are being sent to the same partition. This helps performance on both the client -and the server. This configuration controls the default batch size in bytes. - -[id="{version}-plugins-{type}s-{plugin}-block_on_buffer_full"] -===== `block_on_buffer_full` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `true` - -When our memory buffer is exhausted we must either stop accepting new -records (block) or throw errors. By default this setting is true and we block, -however in some scenarios blocking is not desirable and it is better to immediately give an error. - -[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] -===== `bootstrap_servers` - - * Value type is <> - * Default value is `"localhost:9092"` - -This is for bootstrapping and the producer will only use it for getting metadata (topics, -partitions and replicas). The socket connections for sending the actual data will be -established based on the broker information returned in the metadata. The format is -`host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a -subset of brokers. - -[id="{version}-plugins-{type}s-{plugin}-buffer_memory"] -===== `buffer_memory` - - * Value type is <> - * Default value is `33554432` - -The total bytes of memory the producer can use to buffer records waiting to be sent to the server. - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * Value type is <> - * There is no default value for this setting. - -The id string to pass to the server when making requests. -The purpose of this is to be able to track the source of requests beyond just -ip/port by allowing a logical application name to be included with the request - -[id="{version}-plugins-{type}s-{plugin}-compression_type"] -===== `compression_type` - - * Value can be any of: `none`, `gzip`, `snappy`, `lz4` - * Default value is `"none"` - -The compression type for all data generated by the producer. -The default is none (i.e. no compression). Valid values are none, gzip, or snappy. - -[id="{version}-plugins-{type}s-{plugin}-jaas_path"] -===== `jaas_path` - - * Value type is <> - * There is no default value for this setting. - -The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization -services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: -[source,java] ----------------------------------- -KafkaClient { - com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - renewTicket=true - serviceName="kafka"; - }; ----------------------------------- - -Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these -to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same -`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on -different JVM instances. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] -===== `kerberos_config` - - * Value type is <> - * There is no default value for this setting. - -Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html - -[id="{version}-plugins-{type}s-{plugin}-key_serializer"] -===== `key_serializer` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` - -Serializer class for the key of the message - -[id="{version}-plugins-{type}s-{plugin}-linger_ms"] -===== `linger_ms` - - * Value type is <> - * Default value is `0` - -The producer groups together any records that arrive in between request -transmissions into a single batched request. Normally this occurs only under -load when records arrive faster than they can be sent out. However in some circumstances -the client may want to reduce the number of requests even under moderate load. -This setting accomplishes this by adding a small amount of artificial delay—that is, -rather than immediately sending out a record the producer will wait for up to the given delay -to allow other records to be sent so that the sends can be batched together. - -[id="{version}-plugins-{type}s-{plugin}-max_request_size"] -===== `max_request_size` - - * Value type is <> - * Default value is `1048576` - -The maximum size of a request - -[id="{version}-plugins-{type}s-{plugin}-message_key"] -===== `message_key` - - * Value type is <> - * There is no default value for this setting. - -The key for the message - -[id="{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"] -===== `metadata_fetch_timeout_ms` - - * Value type is <> - * Default value is `60000` - -the timeout setting for initial metadata request to fetch topic metadata. - -[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] -===== `metadata_max_age_ms` - - * Value type is <> - * Default value is `300000` - -the max time in milliseconds before a metadata refresh is forced. - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * Default value is `32768` - -The size of the TCP receive buffer to use when reading data - -[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] -===== `reconnect_backoff_ms` - - * Value type is <> - * Default value is `10` - -The amount of time to wait before attempting to reconnect to a given host when a connection fails. - -[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] -===== `request_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The configuration controls the maximum amount of time the client will wait -for the response of a request. If the response is not received before the timeout -elapses the client will resend the request if necessary or fail the request if -retries are exhausted. - -[id="{version}-plugins-{type}s-{plugin}-retries"] -===== `retries` - - * Value type is <> - * Default value is `0` - -Setting a value greater than zero will cause the client to -resend any record whose send fails with a potentially transient error. - -[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] -===== `retry_backoff_ms` - - * Value type is <> - * Default value is `100` - -The amount of time to wait before attempting to retry a failed produce request to a given topic partition. - -[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] -===== `sasl_kerberos_service_name` - - * Value type is <> - * There is no default value for this setting. - -The Kerberos principal name that Kafka broker runs as. -This can be defined either in Kafka's JAAS config or in Kafka's config. - -[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] -===== `sasl_mechanism` - - * Value type is <> - * Default value is `"GSSAPI"` - -http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. -This may be any mechanism for which a security provider is available. -GSSAPI is the default mechanism. - -[id="{version}-plugins-{type}s-{plugin}-security_protocol"] -===== `security_protocol` - - * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` - * Default value is `"PLAINTEXT"` - -Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL - -[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] -===== `send_buffer_bytes` - - * Value type is <> - * Default value is `131072` - -The size of the TCP send buffer to use when sending data. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `false` - -Enable SSL/TLS secured communication to Kafka broker. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] -===== `ssl_key_password` - - * Value type is <> - * There is no default value for this setting. - -The password of the private key in the key store file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] -===== `ssl_keystore_location` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore path. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] -===== `ssl_keystore_password` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] -===== `ssl_keystore_type` - - * Value type is <> - * There is no default value for this setting. - -The keystore type. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] -===== `ssl_truststore_location` - - * Value type is <> - * There is no default value for this setting. - -The JKS truststore path to validate the Kafka broker's certificate. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] -===== `ssl_truststore_password` - - * Value type is <> - * There is no default value for this setting. - -The truststore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] -===== `ssl_truststore_type` - - * Value type is <> - * There is no default value for this setting. - -The truststore type. - -[id="{version}-plugins-{type}s-{plugin}-timeout_ms"] -===== `timeout_ms` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `30000` - -The configuration controls the maximum amount of time the server will wait for acknowledgments -from followers to meet the acknowledgment requirements the producer has specified with the -acks configuration. If the requested number of acknowledgments are not met when the timeout -elapses an error will be returned. This timeout is measured on the server side and does not -include the network latency of the request. - -[id="{version}-plugins-{type}s-{plugin}-topic_id"] -===== `topic_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The topic to produce messages to - -[id="{version}-plugins-{type}s-{plugin}-value_serializer"] -===== `value_serializer` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` - -Serializer class for the value of the message - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/kafka-v6.2.1.asciidoc b/docs/versioned-plugins/outputs/kafka-v6.2.1.asciidoc deleted file mode 100644 index 2dda41534..000000000 --- a/docs/versioned-plugins/outputs/kafka-v6.2.1.asciidoc +++ /dev/null @@ -1,450 +0,0 @@ -:plugin: kafka -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v6.2.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-kafka/blob/v6.2.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Kafka output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on -the broker. - -Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination -of Logstash and the Kafka output plugin: - -[options="header"] -|========================================================== -|Kafka Client Version |Logstash Version |Plugin Version |Why? -|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular -|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) -|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) -|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker -|0.10.1.x |2.4.x - 5.x.x | 6.x.x | -|========================================================== - -NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should -upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker -is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. - -This output supports connecting to Kafka over: - -* SSL (requires plugin version 3.0.0 or later) -* Kerberos SASL (requires plugin version 5.1.0 or later) - -By default security is disabled but can be turned on as needed. - -The only required configuration is the topic_id. The default codec is plain, -so events will be persisted on the broker in plain format. Logstash will encode your messages with not -only the message but also with a timestamp and hostname. If you do not want anything but your message -passing through, you should make the output configuration something like: -[source,ruby] - output { - kafka { - codec => plain { - format => "%{message}" - } - topic_id => "mytopic" - } - } -For more information see http://kafka.apache.org/documentation.html#theproducer - -Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kafka Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-acks>> |<>, one of `["0", "1", "all"]`|No -| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-buffer_memory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-compression_type>> |<>, one of `["none", "gzip", "snappy", "lz4"]`|No -| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-key_serializer>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-linger_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_request_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No -| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topic_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-value_serializer>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-acks"] -===== `acks` - - * Value can be any of: `0`, `1`, `all` - * Default value is `"1"` - -The number of acknowledgments the producer requires the leader to have received -before considering a request complete. - -acks=0, the producer will not wait for any acknowledgment from the server at all. -acks=1, This will mean the leader will write the record to its local log but - will respond without awaiting full acknowledgement from all followers. -acks=all, This means the leader will wait for the full set of in-sync replicas to acknowledge the record. - -[id="{version}-plugins-{type}s-{plugin}-batch_size"] -===== `batch_size` - - * Value type is <> - * Default value is `16384` - -The producer will attempt to batch records together into fewer requests whenever multiple -records are being sent to the same partition. This helps performance on both the client -and the server. This configuration controls the default batch size in bytes. - -[id="{version}-plugins-{type}s-{plugin}-block_on_buffer_full"] -===== `block_on_buffer_full` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `true` - -When our memory buffer is exhausted we must either stop accepting new -records (block) or throw errors. By default this setting is true and we block, -however in some scenarios blocking is not desirable and it is better to immediately give an error. - -[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] -===== `bootstrap_servers` - - * Value type is <> - * Default value is `"localhost:9092"` - -This is for bootstrapping and the producer will only use it for getting metadata (topics, -partitions and replicas). The socket connections for sending the actual data will be -established based on the broker information returned in the metadata. The format is -`host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a -subset of brokers. - -[id="{version}-plugins-{type}s-{plugin}-buffer_memory"] -===== `buffer_memory` - - * Value type is <> - * Default value is `33554432` - -The total bytes of memory the producer can use to buffer records waiting to be sent to the server. - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * Value type is <> - * There is no default value for this setting. - -The id string to pass to the server when making requests. -The purpose of this is to be able to track the source of requests beyond just -ip/port by allowing a logical application name to be included with the request - -[id="{version}-plugins-{type}s-{plugin}-compression_type"] -===== `compression_type` - - * Value can be any of: `none`, `gzip`, `snappy`, `lz4` - * Default value is `"none"` - -The compression type for all data generated by the producer. -The default is none (i.e. no compression). Valid values are none, gzip, or snappy. - -[id="{version}-plugins-{type}s-{plugin}-jaas_path"] -===== `jaas_path` - - * Value type is <> - * There is no default value for this setting. - -The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization -services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: -[source,java] ----------------------------------- -KafkaClient { - com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - renewTicket=true - serviceName="kafka"; - }; ----------------------------------- - -Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these -to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same -`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on -different JVM instances. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] -===== `kerberos_config` - - * Value type is <> - * There is no default value for this setting. - -Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html - -[id="{version}-plugins-{type}s-{plugin}-key_serializer"] -===== `key_serializer` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` - -Serializer class for the key of the message - -[id="{version}-plugins-{type}s-{plugin}-linger_ms"] -===== `linger_ms` - - * Value type is <> - * Default value is `0` - -The producer groups together any records that arrive in between request -transmissions into a single batched request. Normally this occurs only under -load when records arrive faster than they can be sent out. However in some circumstances -the client may want to reduce the number of requests even under moderate load. -This setting accomplishes this by adding a small amount of artificial delay—that is, -rather than immediately sending out a record the producer will wait for up to the given delay -to allow other records to be sent so that the sends can be batched together. - -[id="{version}-plugins-{type}s-{plugin}-max_request_size"] -===== `max_request_size` - - * Value type is <> - * Default value is `1048576` - -The maximum size of a request - -[id="{version}-plugins-{type}s-{plugin}-message_key"] -===== `message_key` - - * Value type is <> - * There is no default value for this setting. - -The key for the message - -[id="{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"] -===== `metadata_fetch_timeout_ms` - - * Value type is <> - * Default value is `60000` - -the timeout setting for initial metadata request to fetch topic metadata. - -[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] -===== `metadata_max_age_ms` - - * Value type is <> - * Default value is `300000` - -the max time in milliseconds before a metadata refresh is forced. - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * Default value is `32768` - -The size of the TCP receive buffer to use when reading data - -[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] -===== `reconnect_backoff_ms` - - * Value type is <> - * Default value is `10` - -The amount of time to wait before attempting to reconnect to a given host when a connection fails. - -[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] -===== `request_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The configuration controls the maximum amount of time the client will wait -for the response of a request. If the response is not received before the timeout -elapses the client will resend the request if necessary or fail the request if -retries are exhausted. - -[id="{version}-plugins-{type}s-{plugin}-retries"] -===== `retries` - - * Value type is <> - * Default value is `0` - -Setting a value greater than zero will cause the client to -resend any record whose send fails with a potentially transient error. - -[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] -===== `retry_backoff_ms` - - * Value type is <> - * Default value is `100` - -The amount of time to wait before attempting to retry a failed produce request to a given topic partition. - -[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] -===== `sasl_kerberos_service_name` - - * Value type is <> - * There is no default value for this setting. - -The Kerberos principal name that Kafka broker runs as. -This can be defined either in Kafka's JAAS config or in Kafka's config. - -[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] -===== `sasl_mechanism` - - * Value type is <> - * Default value is `"GSSAPI"` - -http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. -This may be any mechanism for which a security provider is available. -GSSAPI is the default mechanism. - -[id="{version}-plugins-{type}s-{plugin}-security_protocol"] -===== `security_protocol` - - * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` - * Default value is `"PLAINTEXT"` - -Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL - -[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] -===== `send_buffer_bytes` - - * Value type is <> - * Default value is `131072` - -The size of the TCP send buffer to use when sending data. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `false` - -Enable SSL/TLS secured communication to Kafka broker. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] -===== `ssl_key_password` - - * Value type is <> - * There is no default value for this setting. - -The password of the private key in the key store file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] -===== `ssl_keystore_location` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore path. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] -===== `ssl_keystore_password` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] -===== `ssl_keystore_type` - - * Value type is <> - * There is no default value for this setting. - -The keystore type. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] -===== `ssl_truststore_location` - - * Value type is <> - * There is no default value for this setting. - -The JKS truststore path to validate the Kafka broker's certificate. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] -===== `ssl_truststore_password` - - * Value type is <> - * There is no default value for this setting. - -The truststore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] -===== `ssl_truststore_type` - - * Value type is <> - * There is no default value for this setting. - -The truststore type. - -[id="{version}-plugins-{type}s-{plugin}-timeout_ms"] -===== `timeout_ms` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `30000` - -The configuration controls the maximum amount of time the server will wait for acknowledgments -from followers to meet the acknowledgment requirements the producer has specified with the -acks configuration. If the requested number of acknowledgments are not met when the timeout -elapses an error will be returned. This timeout is measured on the server side and does not -include the network latency of the request. - -[id="{version}-plugins-{type}s-{plugin}-topic_id"] -===== `topic_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The topic to produce messages to - -[id="{version}-plugins-{type}s-{plugin}-value_serializer"] -===== `value_serializer` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` - -Serializer class for the value of the message - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/kafka-v6.2.2.asciidoc b/docs/versioned-plugins/outputs/kafka-v6.2.2.asciidoc deleted file mode 100644 index 7e1ccef09..000000000 --- a/docs/versioned-plugins/outputs/kafka-v6.2.2.asciidoc +++ /dev/null @@ -1,451 +0,0 @@ -:plugin: kafka -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v6.2.2 -:release_date: 2017-07-11 -:changelog_url: https://github.com/logstash-plugins/logstash-output-kafka/blob/v6.2.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Kafka output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on -the broker. - -Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination -of Logstash and the Kafka output plugin: - -[options="header"] -|========================================================== -|Kafka Client Version |Logstash Version |Plugin Version |Why? -|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular -|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) -|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) -|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker -|0.10.1.x |2.4.x - 5.x.x | 6.x.x | -|0.11.0.0 |2.4.x - 5.x.x | 6.2.2 |Not compatible with the <= 0.9 broker -|========================================================== - -NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should -upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker -is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. - -This output supports connecting to Kafka over: - -* SSL (requires plugin version 3.0.0 or later) -* Kerberos SASL (requires plugin version 5.1.0 or later) - -By default security is disabled but can be turned on as needed. - -The only required configuration is the topic_id. The default codec is plain, -so events will be persisted on the broker in plain format. Logstash will encode your messages with not -only the message but also with a timestamp and hostname. If you do not want anything but your message -passing through, you should make the output configuration something like: -[source,ruby] - output { - kafka { - codec => plain { - format => "%{message}" - } - topic_id => "mytopic" - } - } -For more information see http://kafka.apache.org/documentation.html#theproducer - -Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kafka Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-acks>> |<>, one of `["0", "1", "all"]`|No -| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-buffer_memory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-compression_type>> |<>, one of `["none", "gzip", "snappy", "lz4"]`|No -| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-key_serializer>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-linger_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_request_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No -| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topic_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-value_serializer>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-acks"] -===== `acks` - - * Value can be any of: `0`, `1`, `all` - * Default value is `"1"` - -The number of acknowledgments the producer requires the leader to have received -before considering a request complete. - -acks=0, the producer will not wait for any acknowledgment from the server at all. -acks=1, This will mean the leader will write the record to its local log but - will respond without awaiting full acknowledgement from all followers. -acks=all, This means the leader will wait for the full set of in-sync replicas to acknowledge the record. - -[id="{version}-plugins-{type}s-{plugin}-batch_size"] -===== `batch_size` - - * Value type is <> - * Default value is `16384` - -The producer will attempt to batch records together into fewer requests whenever multiple -records are being sent to the same partition. This helps performance on both the client -and the server. This configuration controls the default batch size in bytes. - -[id="{version}-plugins-{type}s-{plugin}-block_on_buffer_full"] -===== `block_on_buffer_full` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `true` - -When our memory buffer is exhausted we must either stop accepting new -records (block) or throw errors. By default this setting is true and we block, -however in some scenarios blocking is not desirable and it is better to immediately give an error. - -[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] -===== `bootstrap_servers` - - * Value type is <> - * Default value is `"localhost:9092"` - -This is for bootstrapping and the producer will only use it for getting metadata (topics, -partitions and replicas). The socket connections for sending the actual data will be -established based on the broker information returned in the metadata. The format is -`host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a -subset of brokers. - -[id="{version}-plugins-{type}s-{plugin}-buffer_memory"] -===== `buffer_memory` - - * Value type is <> - * Default value is `33554432` - -The total bytes of memory the producer can use to buffer records waiting to be sent to the server. - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * Value type is <> - * There is no default value for this setting. - -The id string to pass to the server when making requests. -The purpose of this is to be able to track the source of requests beyond just -ip/port by allowing a logical application name to be included with the request - -[id="{version}-plugins-{type}s-{plugin}-compression_type"] -===== `compression_type` - - * Value can be any of: `none`, `gzip`, `snappy`, `lz4` - * Default value is `"none"` - -The compression type for all data generated by the producer. -The default is none (i.e. no compression). Valid values are none, gzip, or snappy. - -[id="{version}-plugins-{type}s-{plugin}-jaas_path"] -===== `jaas_path` - - * Value type is <> - * There is no default value for this setting. - -The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization -services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: -[source,java] ----------------------------------- -KafkaClient { - com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - renewTicket=true - serviceName="kafka"; - }; ----------------------------------- - -Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these -to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same -`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on -different JVM instances. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] -===== `kerberos_config` - - * Value type is <> - * There is no default value for this setting. - -Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html - -[id="{version}-plugins-{type}s-{plugin}-key_serializer"] -===== `key_serializer` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` - -Serializer class for the key of the message - -[id="{version}-plugins-{type}s-{plugin}-linger_ms"] -===== `linger_ms` - - * Value type is <> - * Default value is `0` - -The producer groups together any records that arrive in between request -transmissions into a single batched request. Normally this occurs only under -load when records arrive faster than they can be sent out. However in some circumstances -the client may want to reduce the number of requests even under moderate load. -This setting accomplishes this by adding a small amount of artificial delay—that is, -rather than immediately sending out a record the producer will wait for up to the given delay -to allow other records to be sent so that the sends can be batched together. - -[id="{version}-plugins-{type}s-{plugin}-max_request_size"] -===== `max_request_size` - - * Value type is <> - * Default value is `1048576` - -The maximum size of a request - -[id="{version}-plugins-{type}s-{plugin}-message_key"] -===== `message_key` - - * Value type is <> - * There is no default value for this setting. - -The key for the message - -[id="{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"] -===== `metadata_fetch_timeout_ms` - - * Value type is <> - * Default value is `60000` - -the timeout setting for initial metadata request to fetch topic metadata. - -[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] -===== `metadata_max_age_ms` - - * Value type is <> - * Default value is `300000` - -the max time in milliseconds before a metadata refresh is forced. - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * Default value is `32768` - -The size of the TCP receive buffer to use when reading data - -[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] -===== `reconnect_backoff_ms` - - * Value type is <> - * Default value is `10` - -The amount of time to wait before attempting to reconnect to a given host when a connection fails. - -[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] -===== `request_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The configuration controls the maximum amount of time the client will wait -for the response of a request. If the response is not received before the timeout -elapses the client will resend the request if necessary or fail the request if -retries are exhausted. - -[id="{version}-plugins-{type}s-{plugin}-retries"] -===== `retries` - - * Value type is <> - * Default value is `0` - -Setting a value greater than zero will cause the client to -resend any record whose send fails with a potentially transient error. - -[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] -===== `retry_backoff_ms` - - * Value type is <> - * Default value is `100` - -The amount of time to wait before attempting to retry a failed produce request to a given topic partition. - -[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] -===== `sasl_kerberos_service_name` - - * Value type is <> - * There is no default value for this setting. - -The Kerberos principal name that Kafka broker runs as. -This can be defined either in Kafka's JAAS config or in Kafka's config. - -[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] -===== `sasl_mechanism` - - * Value type is <> - * Default value is `"GSSAPI"` - -http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. -This may be any mechanism for which a security provider is available. -GSSAPI is the default mechanism. - -[id="{version}-plugins-{type}s-{plugin}-security_protocol"] -===== `security_protocol` - - * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` - * Default value is `"PLAINTEXT"` - -Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL - -[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] -===== `send_buffer_bytes` - - * Value type is <> - * Default value is `131072` - -The size of the TCP send buffer to use when sending data. - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `false` - -Enable SSL/TLS secured communication to Kafka broker. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] -===== `ssl_key_password` - - * Value type is <> - * There is no default value for this setting. - -The password of the private key in the key store file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] -===== `ssl_keystore_location` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore path. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] -===== `ssl_keystore_password` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] -===== `ssl_keystore_type` - - * Value type is <> - * There is no default value for this setting. - -The keystore type. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] -===== `ssl_truststore_location` - - * Value type is <> - * There is no default value for this setting. - -The JKS truststore path to validate the Kafka broker's certificate. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] -===== `ssl_truststore_password` - - * Value type is <> - * There is no default value for this setting. - -The truststore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] -===== `ssl_truststore_type` - - * Value type is <> - * There is no default value for this setting. - -The truststore type. - -[id="{version}-plugins-{type}s-{plugin}-timeout_ms"] -===== `timeout_ms` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `30000` - -The configuration controls the maximum amount of time the server will wait for acknowledgments -from followers to meet the acknowledgment requirements the producer has specified with the -acks configuration. If the requested number of acknowledgments are not met when the timeout -elapses an error will be returned. This timeout is measured on the server side and does not -include the network latency of the request. - -[id="{version}-plugins-{type}s-{plugin}-topic_id"] -===== `topic_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The topic to produce messages to - -[id="{version}-plugins-{type}s-{plugin}-value_serializer"] -===== `value_serializer` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` - -Serializer class for the value of the message - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/kafka-v7.0.0.asciidoc b/docs/versioned-plugins/outputs/kafka-v7.0.0.asciidoc deleted file mode 100644 index 4d8df2297..000000000 --- a/docs/versioned-plugins/outputs/kafka-v7.0.0.asciidoc +++ /dev/null @@ -1,418 +0,0 @@ -:plugin: kafka -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v7.0.0 -:release_date: 2017-08-01 -:changelog_url: https://github.com/logstash-plugins/logstash-output-kafka/blob/v7.0.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Kafka output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on -the broker. - -Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination -of Logstash and the Kafka output plugin: - -[options="header"] -|========================================================== -|Kafka Client Version |Logstash Version |Plugin Version |Why? -|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular -|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) -|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) -|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker -|0.10.1.x |2.4.x - 5.x.x | 6.x.x | -|0.11.0.0 |2.4.x - 5.x.x | 6.2.2 |Not compatible with the <= 0.9 broker -|========================================================== - -NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should -upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker -is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. - -This output supports connecting to Kafka over: - -* SSL (requires plugin version 3.0.0 or later) -* Kerberos SASL (requires plugin version 5.1.0 or later) - -By default security is disabled but can be turned on as needed. - -The only required configuration is the topic_id. The default codec is plain, -so events will be persisted on the broker in plain format. Logstash will encode your messages with not -only the message but also with a timestamp and hostname. If you do not want anything but your message -passing through, you should make the output configuration something like: -[source,ruby] - output { - kafka { - codec => plain { - format => "%{message}" - } - topic_id => "mytopic" - } - } -For more information see http://kafka.apache.org/documentation.html#theproducer - -Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kafka Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-acks>> |<>, one of `["0", "1", "all"]`|No -| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-buffer_memory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-compression_type>> |<>, one of `["none", "gzip", "snappy", "lz4"]`|No -| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-key_serializer>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-linger_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_request_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No -| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topic_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-value_serializer>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-acks"] -===== `acks` - - * Value can be any of: `0`, `1`, `all` - * Default value is `"1"` - -The number of acknowledgments the producer requires the leader to have received -before considering a request complete. - -acks=0, the producer will not wait for any acknowledgment from the server at all. -acks=1, This will mean the leader will write the record to its local log but - will respond without awaiting full acknowledgement from all followers. -acks=all, This means the leader will wait for the full set of in-sync replicas to acknowledge the record. - -[id="{version}-plugins-{type}s-{plugin}-batch_size"] -===== `batch_size` - - * Value type is <> - * Default value is `16384` - -The producer will attempt to batch records together into fewer requests whenever multiple -records are being sent to the same partition. This helps performance on both the client -and the server. This configuration controls the default batch size in bytes. - -[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] -===== `bootstrap_servers` - - * Value type is <> - * Default value is `"localhost:9092"` - -This is for bootstrapping and the producer will only use it for getting metadata (topics, -partitions and replicas). The socket connections for sending the actual data will be -established based on the broker information returned in the metadata. The format is -`host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a -subset of brokers. - -[id="{version}-plugins-{type}s-{plugin}-buffer_memory"] -===== `buffer_memory` - - * Value type is <> - * Default value is `33554432` - -The total bytes of memory the producer can use to buffer records waiting to be sent to the server. - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * Value type is <> - * There is no default value for this setting. - -The id string to pass to the server when making requests. -The purpose of this is to be able to track the source of requests beyond just -ip/port by allowing a logical application name to be included with the request - -[id="{version}-plugins-{type}s-{plugin}-compression_type"] -===== `compression_type` - - * Value can be any of: `none`, `gzip`, `snappy`, `lz4` - * Default value is `"none"` - -The compression type for all data generated by the producer. -The default is none (i.e. no compression). Valid values are none, gzip, or snappy. - -[id="{version}-plugins-{type}s-{plugin}-jaas_path"] -===== `jaas_path` - - * Value type is <> - * There is no default value for this setting. - -The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization -services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: -[source,java] ----------------------------------- -KafkaClient { - com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - renewTicket=true - serviceName="kafka"; - }; ----------------------------------- - -Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these -to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same -`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on -different JVM instances. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] -===== `kerberos_config` - - * Value type is <> - * There is no default value for this setting. - -Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html - -[id="{version}-plugins-{type}s-{plugin}-key_serializer"] -===== `key_serializer` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` - -Serializer class for the key of the message - -[id="{version}-plugins-{type}s-{plugin}-linger_ms"] -===== `linger_ms` - - * Value type is <> - * Default value is `0` - -The producer groups together any records that arrive in between request -transmissions into a single batched request. Normally this occurs only under -load when records arrive faster than they can be sent out. However in some circumstances -the client may want to reduce the number of requests even under moderate load. -This setting accomplishes this by adding a small amount of artificial delay—that is, -rather than immediately sending out a record the producer will wait for up to the given delay -to allow other records to be sent so that the sends can be batched together. - -[id="{version}-plugins-{type}s-{plugin}-max_request_size"] -===== `max_request_size` - - * Value type is <> - * Default value is `1048576` - -The maximum size of a request - -[id="{version}-plugins-{type}s-{plugin}-message_key"] -===== `message_key` - - * Value type is <> - * There is no default value for this setting. - -The key for the message - -[id="{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"] -===== `metadata_fetch_timeout_ms` - - * Value type is <> - * Default value is `60000` - -the timeout setting for initial metadata request to fetch topic metadata. - -[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] -===== `metadata_max_age_ms` - - * Value type is <> - * Default value is `300000` - -the max time in milliseconds before a metadata refresh is forced. - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * Default value is `32768` - -The size of the TCP receive buffer to use when reading data - -[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] -===== `reconnect_backoff_ms` - - * Value type is <> - * Default value is `10` - -The amount of time to wait before attempting to reconnect to a given host when a connection fails. - -[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] -===== `request_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The configuration controls the maximum amount of time the client will wait -for the response of a request. If the response is not received before the timeout -elapses the client will resend the request if necessary or fail the request if -retries are exhausted. - -[id="{version}-plugins-{type}s-{plugin}-retries"] -===== `retries` - - * Value type is <> - * Default value is `0` - -Setting a value greater than zero will cause the client to -resend any record whose send fails with a potentially transient error. - -[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] -===== `retry_backoff_ms` - - * Value type is <> - * Default value is `100` - -The amount of time to wait before attempting to retry a failed produce request to a given topic partition. - -[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] -===== `sasl_kerberos_service_name` - - * Value type is <> - * There is no default value for this setting. - -The Kerberos principal name that Kafka broker runs as. -This can be defined either in Kafka's JAAS config or in Kafka's config. - -[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] -===== `sasl_mechanism` - - * Value type is <> - * Default value is `"GSSAPI"` - -http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. -This may be any mechanism for which a security provider is available. -GSSAPI is the default mechanism. - -[id="{version}-plugins-{type}s-{plugin}-security_protocol"] -===== `security_protocol` - - * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` - * Default value is `"PLAINTEXT"` - -Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL - -[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] -===== `send_buffer_bytes` - - * Value type is <> - * Default value is `131072` - -The size of the TCP send buffer to use when sending data. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] -===== `ssl_key_password` - - * Value type is <> - * There is no default value for this setting. - -The password of the private key in the key store file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] -===== `ssl_keystore_location` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore path. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] -===== `ssl_keystore_password` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] -===== `ssl_keystore_type` - - * Value type is <> - * There is no default value for this setting. - -The keystore type. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] -===== `ssl_truststore_location` - - * Value type is <> - * There is no default value for this setting. - -The JKS truststore path to validate the Kafka broker's certificate. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] -===== `ssl_truststore_password` - - * Value type is <> - * There is no default value for this setting. - -The truststore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] -===== `ssl_truststore_type` - - * Value type is <> - * There is no default value for this setting. - -The truststore type. - -[id="{version}-plugins-{type}s-{plugin}-topic_id"] -===== `topic_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The topic to produce messages to - -[id="{version}-plugins-{type}s-{plugin}-value_serializer"] -===== `value_serializer` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` - -Serializer class for the value of the message - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/kafka-v7.0.1.asciidoc b/docs/versioned-plugins/outputs/kafka-v7.0.1.asciidoc deleted file mode 100644 index 3e95a9d75..000000000 --- a/docs/versioned-plugins/outputs/kafka-v7.0.1.asciidoc +++ /dev/null @@ -1,418 +0,0 @@ -:plugin: kafka -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v7.0.1 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-kafka/blob/v7.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Kafka output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on -the broker. - -Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination -of Logstash and the Kafka output plugin: - -[options="header"] -|========================================================== -|Kafka Client Version |Logstash Version |Plugin Version |Why? -|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular -|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) -|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) -|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker -|0.10.1.x |2.4.x - 5.x.x | 6.x.x | -|0.11.0.0 |2.4.x - 5.x.x | 6.2.2 |Not compatible with the <= 0.9 broker -|========================================================== - -NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should -upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker -is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. - -This output supports connecting to Kafka over: - -* SSL (requires plugin version 3.0.0 or later) -* Kerberos SASL (requires plugin version 5.1.0 or later) - -By default security is disabled but can be turned on as needed. - -The only required configuration is the topic_id. The default codec is plain, -so events will be persisted on the broker in plain format. Logstash will encode your messages with not -only the message but also with a timestamp and hostname. If you do not want anything but your message -passing through, you should make the output configuration something like: -[source,ruby] - output { - kafka { - codec => plain { - format => "%{message}" - } - topic_id => "mytopic" - } - } -For more information see http://kafka.apache.org/documentation.html#theproducer - -Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kafka Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-acks>> |<>, one of `["0", "1", "all"]`|No -| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-buffer_memory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-compression_type>> |<>, one of `["none", "gzip", "snappy", "lz4"]`|No -| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-key_serializer>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-linger_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_request_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No -| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topic_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-value_serializer>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-acks"] -===== `acks` - - * Value can be any of: `0`, `1`, `all` - * Default value is `"1"` - -The number of acknowledgments the producer requires the leader to have received -before considering a request complete. - -acks=0, the producer will not wait for any acknowledgment from the server at all. -acks=1, This will mean the leader will write the record to its local log but - will respond without awaiting full acknowledgement from all followers. -acks=all, This means the leader will wait for the full set of in-sync replicas to acknowledge the record. - -[id="{version}-plugins-{type}s-{plugin}-batch_size"] -===== `batch_size` - - * Value type is <> - * Default value is `16384` - -The producer will attempt to batch records together into fewer requests whenever multiple -records are being sent to the same partition. This helps performance on both the client -and the server. This configuration controls the default batch size in bytes. - -[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] -===== `bootstrap_servers` - - * Value type is <> - * Default value is `"localhost:9092"` - -This is for bootstrapping and the producer will only use it for getting metadata (topics, -partitions and replicas). The socket connections for sending the actual data will be -established based on the broker information returned in the metadata. The format is -`host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a -subset of brokers. - -[id="{version}-plugins-{type}s-{plugin}-buffer_memory"] -===== `buffer_memory` - - * Value type is <> - * Default value is `33554432` - -The total bytes of memory the producer can use to buffer records waiting to be sent to the server. - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * Value type is <> - * There is no default value for this setting. - -The id string to pass to the server when making requests. -The purpose of this is to be able to track the source of requests beyond just -ip/port by allowing a logical application name to be included with the request - -[id="{version}-plugins-{type}s-{plugin}-compression_type"] -===== `compression_type` - - * Value can be any of: `none`, `gzip`, `snappy`, `lz4` - * Default value is `"none"` - -The compression type for all data generated by the producer. -The default is none (i.e. no compression). Valid values are none, gzip, or snappy. - -[id="{version}-plugins-{type}s-{plugin}-jaas_path"] -===== `jaas_path` - - * Value type is <> - * There is no default value for this setting. - -The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization -services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: -[source,java] ----------------------------------- -KafkaClient { - com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - renewTicket=true - serviceName="kafka"; - }; ----------------------------------- - -Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these -to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same -`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on -different JVM instances. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] -===== `kerberos_config` - - * Value type is <> - * There is no default value for this setting. - -Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html - -[id="{version}-plugins-{type}s-{plugin}-key_serializer"] -===== `key_serializer` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` - -Serializer class for the key of the message - -[id="{version}-plugins-{type}s-{plugin}-linger_ms"] -===== `linger_ms` - - * Value type is <> - * Default value is `0` - -The producer groups together any records that arrive in between request -transmissions into a single batched request. Normally this occurs only under -load when records arrive faster than they can be sent out. However in some circumstances -the client may want to reduce the number of requests even under moderate load. -This setting accomplishes this by adding a small amount of artificial delay—that is, -rather than immediately sending out a record the producer will wait for up to the given delay -to allow other records to be sent so that the sends can be batched together. - -[id="{version}-plugins-{type}s-{plugin}-max_request_size"] -===== `max_request_size` - - * Value type is <> - * Default value is `1048576` - -The maximum size of a request - -[id="{version}-plugins-{type}s-{plugin}-message_key"] -===== `message_key` - - * Value type is <> - * There is no default value for this setting. - -The key for the message - -[id="{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"] -===== `metadata_fetch_timeout_ms` - - * Value type is <> - * Default value is `60000` - -the timeout setting for initial metadata request to fetch topic metadata. - -[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] -===== `metadata_max_age_ms` - - * Value type is <> - * Default value is `300000` - -the max time in milliseconds before a metadata refresh is forced. - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * Default value is `32768` - -The size of the TCP receive buffer to use when reading data - -[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] -===== `reconnect_backoff_ms` - - * Value type is <> - * Default value is `10` - -The amount of time to wait before attempting to reconnect to a given host when a connection fails. - -[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] -===== `request_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The configuration controls the maximum amount of time the client will wait -for the response of a request. If the response is not received before the timeout -elapses the client will resend the request if necessary or fail the request if -retries are exhausted. - -[id="{version}-plugins-{type}s-{plugin}-retries"] -===== `retries` - - * Value type is <> - * Default value is `0` - -Setting a value greater than zero will cause the client to -resend any record whose send fails with a potentially transient error. - -[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] -===== `retry_backoff_ms` - - * Value type is <> - * Default value is `100` - -The amount of time to wait before attempting to retry a failed produce request to a given topic partition. - -[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] -===== `sasl_kerberos_service_name` - - * Value type is <> - * There is no default value for this setting. - -The Kerberos principal name that Kafka broker runs as. -This can be defined either in Kafka's JAAS config or in Kafka's config. - -[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] -===== `sasl_mechanism` - - * Value type is <> - * Default value is `"GSSAPI"` - -http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. -This may be any mechanism for which a security provider is available. -GSSAPI is the default mechanism. - -[id="{version}-plugins-{type}s-{plugin}-security_protocol"] -===== `security_protocol` - - * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` - * Default value is `"PLAINTEXT"` - -Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL - -[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] -===== `send_buffer_bytes` - - * Value type is <> - * Default value is `131072` - -The size of the TCP send buffer to use when sending data. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] -===== `ssl_key_password` - - * Value type is <> - * There is no default value for this setting. - -The password of the private key in the key store file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] -===== `ssl_keystore_location` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore path. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] -===== `ssl_keystore_password` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] -===== `ssl_keystore_type` - - * Value type is <> - * There is no default value for this setting. - -The keystore type. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] -===== `ssl_truststore_location` - - * Value type is <> - * There is no default value for this setting. - -The JKS truststore path to validate the Kafka broker's certificate. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] -===== `ssl_truststore_password` - - * Value type is <> - * There is no default value for this setting. - -The truststore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] -===== `ssl_truststore_type` - - * Value type is <> - * There is no default value for this setting. - -The truststore type. - -[id="{version}-plugins-{type}s-{plugin}-topic_id"] -===== `topic_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The topic to produce messages to - -[id="{version}-plugins-{type}s-{plugin}-value_serializer"] -===== `value_serializer` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` - -Serializer class for the value of the message - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/kafka-v7.0.3.asciidoc b/docs/versioned-plugins/outputs/kafka-v7.0.3.asciidoc deleted file mode 100644 index 06e3a8711..000000000 --- a/docs/versioned-plugins/outputs/kafka-v7.0.3.asciidoc +++ /dev/null @@ -1,425 +0,0 @@ -:plugin: kafka -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v7.0.3 -:release_date: 2017-10-09 -:changelog_url: https://github.com/logstash-plugins/logstash-output-kafka/blob/v7.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Kafka output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on -the broker. - -Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination -of Logstash and the Kafka output plugin: - -[options="header"] -|========================================================== -|Kafka Client Version |Logstash Version |Plugin Version |Why? -|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular -|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) -|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) -|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker -|0.10.1.x |2.4.x - 5.x.x | 6.x.x | -|0.11.0.0 |2.4.x - 5.x.x | 6.2.2 |Not compatible with the <= 0.9 broker -|========================================================== - -NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should -upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker -is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. - -This output supports connecting to Kafka over: - -* SSL (requires plugin version 3.0.0 or later) -* Kerberos SASL (requires plugin version 5.1.0 or later) - -By default security is disabled but can be turned on as needed. - -The only required configuration is the topic_id. - -The default codec is plain. Logstash will encode your events with not only the message field but also with a timestamp and hostname. - -If you want the full content of your events to be sent as json, you should set the codec in the output configuration like this: -[source,ruby] - output { - kafka { - codec => json - topic_id => "mytopic" - } - } - -For more information see http://kafka.apache.org/documentation.html#theproducer - -Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kafka Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-acks>> |<>, one of `["0", "1", "all"]`|No -| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-buffer_memory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-compression_type>> |<>, one of `["none", "gzip", "snappy", "lz4"]`|No -| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-key_serializer>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-linger_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_request_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No -| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topic_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-value_serializer>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-acks"] -===== `acks` - - * Value can be any of: `0`, `1`, `all` - * Default value is `"1"` - -The number of acknowledgments the producer requires the leader to have received -before considering a request complete. - -acks=0, the producer will not wait for any acknowledgment from the server at all. -acks=1, This will mean the leader will write the record to its local log but - will respond without awaiting full acknowledgement from all followers. -acks=all, This means the leader will wait for the full set of in-sync replicas to acknowledge the record. - -[id="{version}-plugins-{type}s-{plugin}-batch_size"] -===== `batch_size` - - * Value type is <> - * Default value is `16384` - -The producer will attempt to batch records together into fewer requests whenever multiple -records are being sent to the same partition. This helps performance on both the client -and the server. This configuration controls the default batch size in bytes. - -[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] -===== `bootstrap_servers` - - * Value type is <> - * Default value is `"localhost:9092"` - -This is for bootstrapping and the producer will only use it for getting metadata (topics, -partitions and replicas). The socket connections for sending the actual data will be -established based on the broker information returned in the metadata. The format is -`host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a -subset of brokers. - -[id="{version}-plugins-{type}s-{plugin}-buffer_memory"] -===== `buffer_memory` - - * Value type is <> - * Default value is `33554432` - -The total bytes of memory the producer can use to buffer records waiting to be sent to the server. - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * Value type is <> - * There is no default value for this setting. - -The id string to pass to the server when making requests. -The purpose of this is to be able to track the source of requests beyond just -ip/port by allowing a logical application name to be included with the request - -[id="{version}-plugins-{type}s-{plugin}-compression_type"] -===== `compression_type` - - * Value can be any of: `none`, `gzip`, `snappy`, `lz4` - * Default value is `"none"` - -The compression type for all data generated by the producer. -The default is none (i.e. no compression). Valid values are none, gzip, or snappy. - -[id="{version}-plugins-{type}s-{plugin}-jaas_path"] -===== `jaas_path` - - * Value type is <> - * There is no default value for this setting. - -The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization -services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: -[source,java] ----------------------------------- -KafkaClient { - com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - renewTicket=true - serviceName="kafka"; - }; ----------------------------------- - -Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these -to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same -`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on -different JVM instances. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] -===== `kerberos_config` - - * Value type is <> - * There is no default value for this setting. - -Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html - -[id="{version}-plugins-{type}s-{plugin}-key_serializer"] -===== `key_serializer` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` - -Serializer class for the key of the message - -[id="{version}-plugins-{type}s-{plugin}-linger_ms"] -===== `linger_ms` - - * Value type is <> - * Default value is `0` - -The producer groups together any records that arrive in between request -transmissions into a single batched request. Normally this occurs only under -load when records arrive faster than they can be sent out. However in some circumstances -the client may want to reduce the number of requests even under moderate load. -This setting accomplishes this by adding a small amount of artificial delay—that is, -rather than immediately sending out a record the producer will wait for up to the given delay -to allow other records to be sent so that the sends can be batched together. - -[id="{version}-plugins-{type}s-{plugin}-max_request_size"] -===== `max_request_size` - - * Value type is <> - * Default value is `1048576` - -The maximum size of a request - -[id="{version}-plugins-{type}s-{plugin}-message_key"] -===== `message_key` - - * Value type is <> - * There is no default value for this setting. - -The key for the message - -[id="{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"] -===== `metadata_fetch_timeout_ms` - - * Value type is <> - * Default value is `60000` - -the timeout setting for initial metadata request to fetch topic metadata. - -[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] -===== `metadata_max_age_ms` - - * Value type is <> - * Default value is `300000` - -the max time in milliseconds before a metadata refresh is forced. - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * Default value is `32768` - -The size of the TCP receive buffer to use when reading data - -[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] -===== `reconnect_backoff_ms` - - * Value type is <> - * Default value is `10` - -The amount of time to wait before attempting to reconnect to a given host when a connection fails. - -[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] -===== `request_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The configuration controls the maximum amount of time the client will wait -for the response of a request. If the response is not received before the timeout -elapses the client will resend the request if necessary or fail the request if -retries are exhausted. - -[id="{version}-plugins-{type}s-{plugin}-retries"] -===== `retries` - - * Value type is <> - * There is no default value for this setting. - -The default retry behavior is to retry until successful. To prevent data loss, -the use of this setting is discouraged. - -If you choose to set `retries`, a value greater than zero will cause the -client to only retry a fixed number of times. This will result in data loss -if a transport fault exists for longer than your retry count (network outage, -Kafka down, etc). - -A value less than zero is a configuration error. - -[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] -===== `retry_backoff_ms` - - * Value type is <> - * Default value is `100` - -The amount of time to wait before attempting to retry a failed produce request to a given topic partition. - -[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] -===== `sasl_kerberos_service_name` - - * Value type is <> - * There is no default value for this setting. - -The Kerberos principal name that Kafka broker runs as. -This can be defined either in Kafka's JAAS config or in Kafka's config. - -[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] -===== `sasl_mechanism` - - * Value type is <> - * Default value is `"GSSAPI"` - -http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. -This may be any mechanism for which a security provider is available. -GSSAPI is the default mechanism. - -[id="{version}-plugins-{type}s-{plugin}-security_protocol"] -===== `security_protocol` - - * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` - * Default value is `"PLAINTEXT"` - -Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL - -[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] -===== `send_buffer_bytes` - - * Value type is <> - * Default value is `131072` - -The size of the TCP send buffer to use when sending data. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] -===== `ssl_key_password` - - * Value type is <> - * There is no default value for this setting. - -The password of the private key in the key store file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] -===== `ssl_keystore_location` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore path. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] -===== `ssl_keystore_password` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] -===== `ssl_keystore_type` - - * Value type is <> - * There is no default value for this setting. - -The keystore type. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] -===== `ssl_truststore_location` - - * Value type is <> - * There is no default value for this setting. - -The JKS truststore path to validate the Kafka broker's certificate. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] -===== `ssl_truststore_password` - - * Value type is <> - * There is no default value for this setting. - -The truststore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] -===== `ssl_truststore_type` - - * Value type is <> - * There is no default value for this setting. - -The truststore type. - -[id="{version}-plugins-{type}s-{plugin}-topic_id"] -===== `topic_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The topic to produce messages to - -[id="{version}-plugins-{type}s-{plugin}-value_serializer"] -===== `value_serializer` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` - -Serializer class for the value of the message - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/kafka-v7.0.4.asciidoc b/docs/versioned-plugins/outputs/kafka-v7.0.4.asciidoc deleted file mode 100644 index 3f6c27a91..000000000 --- a/docs/versioned-plugins/outputs/kafka-v7.0.4.asciidoc +++ /dev/null @@ -1,425 +0,0 @@ -:plugin: kafka -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v7.0.4 -:release_date: 2017-10-25 -:changelog_url: https://github.com/logstash-plugins/logstash-output-kafka/blob/v7.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Kafka output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on -the broker. - -Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination -of Logstash and the Kafka output plugin: - -[options="header"] -|========================================================== -|Kafka Client Version |Logstash Version |Plugin Version |Why? -|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular -|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) -|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) -|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker -|0.10.1.x |2.4.x - 5.x.x | 6.x.x | -|0.11.0.0 |2.4.x - 5.x.x | 6.2.2 |Not compatible with the <= 0.9 broker -|========================================================== - -NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should -upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker -is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. - -This output supports connecting to Kafka over: - -* SSL (requires plugin version 3.0.0 or later) -* Kerberos SASL (requires plugin version 5.1.0 or later) - -By default security is disabled but can be turned on as needed. - -The only required configuration is the topic_id. - -The default codec is plain. Logstash will encode your events with not only the message field but also with a timestamp and hostname. - -If you want the full content of your events to be sent as json, you should set the codec in the output configuration like this: -[source,ruby] - output { - kafka { - codec => json - topic_id => "mytopic" - } - } - -For more information see http://kafka.apache.org/documentation.html#theproducer - -Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kafka Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-acks>> |<>, one of `["0", "1", "all"]`|No -| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-buffer_memory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-compression_type>> |<>, one of `["none", "gzip", "snappy", "lz4"]`|No -| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-key_serializer>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-linger_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_request_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No -| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topic_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-value_serializer>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-acks"] -===== `acks` - - * Value can be any of: `0`, `1`, `all` - * Default value is `"1"` - -The number of acknowledgments the producer requires the leader to have received -before considering a request complete. - -acks=0, the producer will not wait for any acknowledgment from the server at all. -acks=1, This will mean the leader will write the record to its local log but - will respond without awaiting full acknowledgement from all followers. -acks=all, This means the leader will wait for the full set of in-sync replicas to acknowledge the record. - -[id="{version}-plugins-{type}s-{plugin}-batch_size"] -===== `batch_size` - - * Value type is <> - * Default value is `16384` - -The producer will attempt to batch records together into fewer requests whenever multiple -records are being sent to the same partition. This helps performance on both the client -and the server. This configuration controls the default batch size in bytes. - -[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] -===== `bootstrap_servers` - - * Value type is <> - * Default value is `"localhost:9092"` - -This is for bootstrapping and the producer will only use it for getting metadata (topics, -partitions and replicas). The socket connections for sending the actual data will be -established based on the broker information returned in the metadata. The format is -`host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a -subset of brokers. - -[id="{version}-plugins-{type}s-{plugin}-buffer_memory"] -===== `buffer_memory` - - * Value type is <> - * Default value is `33554432` - -The total bytes of memory the producer can use to buffer records waiting to be sent to the server. - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * Value type is <> - * There is no default value for this setting. - -The id string to pass to the server when making requests. -The purpose of this is to be able to track the source of requests beyond just -ip/port by allowing a logical application name to be included with the request - -[id="{version}-plugins-{type}s-{plugin}-compression_type"] -===== `compression_type` - - * Value can be any of: `none`, `gzip`, `snappy`, `lz4` - * Default value is `"none"` - -The compression type for all data generated by the producer. -The default is none (i.e. no compression). Valid values are none, gzip, or snappy. - -[id="{version}-plugins-{type}s-{plugin}-jaas_path"] -===== `jaas_path` - - * Value type is <> - * There is no default value for this setting. - -The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization -services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: -[source,java] ----------------------------------- -KafkaClient { - com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - renewTicket=true - serviceName="kafka"; - }; ----------------------------------- - -Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these -to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same -`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on -different JVM instances. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] -===== `kerberos_config` - - * Value type is <> - * There is no default value for this setting. - -Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html - -[id="{version}-plugins-{type}s-{plugin}-key_serializer"] -===== `key_serializer` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` - -Serializer class for the key of the message - -[id="{version}-plugins-{type}s-{plugin}-linger_ms"] -===== `linger_ms` - - * Value type is <> - * Default value is `0` - -The producer groups together any records that arrive in between request -transmissions into a single batched request. Normally this occurs only under -load when records arrive faster than they can be sent out. However in some circumstances -the client may want to reduce the number of requests even under moderate load. -This setting accomplishes this by adding a small amount of artificial delay—that is, -rather than immediately sending out a record the producer will wait for up to the given delay -to allow other records to be sent so that the sends can be batched together. - -[id="{version}-plugins-{type}s-{plugin}-max_request_size"] -===== `max_request_size` - - * Value type is <> - * Default value is `1048576` - -The maximum size of a request - -[id="{version}-plugins-{type}s-{plugin}-message_key"] -===== `message_key` - - * Value type is <> - * There is no default value for this setting. - -The key for the message - -[id="{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"] -===== `metadata_fetch_timeout_ms` - - * Value type is <> - * Default value is `60000` - -the timeout setting for initial metadata request to fetch topic metadata. - -[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] -===== `metadata_max_age_ms` - - * Value type is <> - * Default value is `300000` - -the max time in milliseconds before a metadata refresh is forced. - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * Default value is `32768` - -The size of the TCP receive buffer to use when reading data - -[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] -===== `reconnect_backoff_ms` - - * Value type is <> - * Default value is `10` - -The amount of time to wait before attempting to reconnect to a given host when a connection fails. - -[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] -===== `request_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The configuration controls the maximum amount of time the client will wait -for the response of a request. If the response is not received before the timeout -elapses the client will resend the request if necessary or fail the request if -retries are exhausted. - -[id="{version}-plugins-{type}s-{plugin}-retries"] -===== `retries` - - * Value type is <> - * There is no default value for this setting. - -The default retry behavior is to retry until successful. To prevent data loss, -the use of this setting is discouraged. - -If you choose to set `retries`, a value greater than zero will cause the -client to only retry a fixed number of times. This will result in data loss -if a transport fault exists for longer than your retry count (network outage, -Kafka down, etc). - -A value less than zero is a configuration error. - -[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] -===== `retry_backoff_ms` - - * Value type is <> - * Default value is `100` - -The amount of time to wait before attempting to retry a failed produce request to a given topic partition. - -[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] -===== `sasl_kerberos_service_name` - - * Value type is <> - * There is no default value for this setting. - -The Kerberos principal name that Kafka broker runs as. -This can be defined either in Kafka's JAAS config or in Kafka's config. - -[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] -===== `sasl_mechanism` - - * Value type is <> - * Default value is `"GSSAPI"` - -http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. -This may be any mechanism for which a security provider is available. -GSSAPI is the default mechanism. - -[id="{version}-plugins-{type}s-{plugin}-security_protocol"] -===== `security_protocol` - - * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` - * Default value is `"PLAINTEXT"` - -Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL - -[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] -===== `send_buffer_bytes` - - * Value type is <> - * Default value is `131072` - -The size of the TCP send buffer to use when sending data. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] -===== `ssl_key_password` - - * Value type is <> - * There is no default value for this setting. - -The password of the private key in the key store file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] -===== `ssl_keystore_location` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore path. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] -===== `ssl_keystore_password` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] -===== `ssl_keystore_type` - - * Value type is <> - * There is no default value for this setting. - -The keystore type. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] -===== `ssl_truststore_location` - - * Value type is <> - * There is no default value for this setting. - -The JKS truststore path to validate the Kafka broker's certificate. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] -===== `ssl_truststore_password` - - * Value type is <> - * There is no default value for this setting. - -The truststore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] -===== `ssl_truststore_type` - - * Value type is <> - * There is no default value for this setting. - -The truststore type. - -[id="{version}-plugins-{type}s-{plugin}-topic_id"] -===== `topic_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The topic to produce messages to - -[id="{version}-plugins-{type}s-{plugin}-value_serializer"] -===== `value_serializer` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` - -Serializer class for the value of the message - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/kafka-v7.0.6.asciidoc b/docs/versioned-plugins/outputs/kafka-v7.0.6.asciidoc deleted file mode 100644 index 98a8d71e8..000000000 --- a/docs/versioned-plugins/outputs/kafka-v7.0.6.asciidoc +++ /dev/null @@ -1,425 +0,0 @@ -:plugin: kafka -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v7.0.6 -:release_date: 2018-01-05 -:changelog_url: https://github.com/logstash-plugins/logstash-output-kafka/blob/v7.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Kafka output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events to a Kafka topic. This uses the Kafka Producer API to write messages to a topic on -the broker. - -Here's a compatibility matrix that shows the Kafka client versions that are compatible with each combination -of Logstash and the Kafka output plugin: - -[options="header"] -|========================================================== -|Kafka Client Version |Logstash Version |Plugin Version |Why? -|0.8 |2.0.0 - 2.x.x |<3.0.0 |Legacy, 0.8 is still popular -|0.9 |2.0.0 - 2.3.x | 3.x.x |Works with the old Ruby Event API (`event['product']['price'] = 10`) -|0.9 |2.4.x - 5.x.x | 4.x.x |Works with the new getter/setter APIs (`event.set('[product][price]', 10)`) -|0.10.0.x |2.4.x - 5.x.x | 5.x.x |Not compatible with the <= 0.9 broker -|0.10.1.x |2.4.x - 5.x.x | 6.x.x | -|0.11.0.0 |2.4.x - 5.x.x | 6.2.2 |Not compatible with the <= 0.9 broker -|========================================================== - -NOTE: We recommended that you use matching Kafka client and broker versions. During upgrades, you should -upgrade brokers before clients because brokers target backwards compatibility. For example, the 0.9 broker -is compatible with both the 0.8 consumer and 0.9 consumer APIs, but not the other way around. - -This output supports connecting to Kafka over: - -* SSL (requires plugin version 3.0.0 or later) -* Kerberos SASL (requires plugin version 5.1.0 or later) - -By default security is disabled but can be turned on as needed. - -The only required configuration is the topic_id. - -The default codec is plain. Logstash will encode your events with not only the message field but also with a timestamp and hostname. - -If you want the full content of your events to be sent as json, you should set the codec in the output configuration like this: -[source,ruby] - output { - kafka { - codec => json - topic_id => "mytopic" - } - } - -For more information see http://kafka.apache.org/documentation.html#theproducer - -Kafka producer configuration: http://kafka.apache.org/documentation.html#newproducerconfigs - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Kafka Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-acks>> |<>, one of `["0", "1", "all"]`|No -| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bootstrap_servers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-buffer_memory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-compression_type>> |<>, one of `["none", "gzip", "snappy", "lz4"]`|No -| <<{version}-plugins-{type}s-{plugin}-jaas_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_config>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-key_serializer>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-linger_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-max_request_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metadata_max_age_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-receive_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retries>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_backoff_ms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sasl_mechanism>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-security_protocol>> |<>, one of `["PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"]`|No -| <<{version}-plugins-{type}s-{plugin}-send_buffer_bytes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_location>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_truststore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topic_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-value_serializer>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-acks"] -===== `acks` - - * Value can be any of: `0`, `1`, `all` - * Default value is `"1"` - -The number of acknowledgments the producer requires the leader to have received -before considering a request complete. - -acks=0, the producer will not wait for any acknowledgment from the server at all. -acks=1, This will mean the leader will write the record to its local log but - will respond without awaiting full acknowledgement from all followers. -acks=all, This means the leader will wait for the full set of in-sync replicas to acknowledge the record. - -[id="{version}-plugins-{type}s-{plugin}-batch_size"] -===== `batch_size` - - * Value type is <> - * Default value is `16384` - -The producer will attempt to batch records together into fewer requests whenever multiple -records are being sent to the same partition. This helps performance on both the client -and the server. This configuration controls the default batch size in bytes. - -[id="{version}-plugins-{type}s-{plugin}-bootstrap_servers"] -===== `bootstrap_servers` - - * Value type is <> - * Default value is `"localhost:9092"` - -This is for bootstrapping and the producer will only use it for getting metadata (topics, -partitions and replicas). The socket connections for sending the actual data will be -established based on the broker information returned in the metadata. The format is -`host1:port1,host2:port2`, and the list can be a subset of brokers or a VIP pointing to a -subset of brokers. - -[id="{version}-plugins-{type}s-{plugin}-buffer_memory"] -===== `buffer_memory` - - * Value type is <> - * Default value is `33554432` - -The total bytes of memory the producer can use to buffer records waiting to be sent to the server. - -[id="{version}-plugins-{type}s-{plugin}-client_id"] -===== `client_id` - - * Value type is <> - * There is no default value for this setting. - -The id string to pass to the server when making requests. -The purpose of this is to be able to track the source of requests beyond just -ip/port by allowing a logical application name to be included with the request - -[id="{version}-plugins-{type}s-{plugin}-compression_type"] -===== `compression_type` - - * Value can be any of: `none`, `gzip`, `snappy`, `lz4` - * Default value is `"none"` - -The compression type for all data generated by the producer. -The default is none (i.e. no compression). Valid values are none, gzip, or snappy. - -[id="{version}-plugins-{type}s-{plugin}-jaas_path"] -===== `jaas_path` - - * Value type is <> - * There is no default value for this setting. - -The Java Authentication and Authorization Service (JAAS) API supplies user authentication and authorization -services for Kafka. This setting provides the path to the JAAS file. Sample JAAS file for Kafka client: -[source,java] ----------------------------------- -KafkaClient { - com.sun.security.auth.module.Krb5LoginModule required - useTicketCache=true - renewTicket=true - serviceName="kafka"; - }; ----------------------------------- - -Please note that specifying `jaas_path` and `kerberos_config` in the config file will add these -to the global JVM system properties. This means if you have multiple Kafka inputs, all of them would be sharing the same -`jaas_path` and `kerberos_config`. If this is not desirable, you would have to run separate instances of Logstash on -different JVM instances. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_config"] -===== `kerberos_config` - - * Value type is <> - * There is no default value for this setting. - -Optional path to kerberos config file. This is krb5.conf style as detailed in https://web.mit.edu/kerberos/krb5-1.12/doc/admin/conf_files/krb5_conf.html - -[id="{version}-plugins-{type}s-{plugin}-key_serializer"] -===== `key_serializer` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` - -Serializer class for the key of the message - -[id="{version}-plugins-{type}s-{plugin}-linger_ms"] -===== `linger_ms` - - * Value type is <> - * Default value is `0` - -The producer groups together any records that arrive in between request -transmissions into a single batched request. Normally this occurs only under -load when records arrive faster than they can be sent out. However in some circumstances -the client may want to reduce the number of requests even under moderate load. -This setting accomplishes this by adding a small amount of artificial delay—that is, -rather than immediately sending out a record the producer will wait for up to the given delay -to allow other records to be sent so that the sends can be batched together. - -[id="{version}-plugins-{type}s-{plugin}-max_request_size"] -===== `max_request_size` - - * Value type is <> - * Default value is `1048576` - -The maximum size of a request - -[id="{version}-plugins-{type}s-{plugin}-message_key"] -===== `message_key` - - * Value type is <> - * There is no default value for this setting. - -The key for the message - -[id="{version}-plugins-{type}s-{plugin}-metadata_fetch_timeout_ms"] -===== `metadata_fetch_timeout_ms` - - * Value type is <> - * Default value is `60000` - -the timeout setting for initial metadata request to fetch topic metadata. - -[id="{version}-plugins-{type}s-{plugin}-metadata_max_age_ms"] -===== `metadata_max_age_ms` - - * Value type is <> - * Default value is `300000` - -the max time in milliseconds before a metadata refresh is forced. - -[id="{version}-plugins-{type}s-{plugin}-receive_buffer_bytes"] -===== `receive_buffer_bytes` - - * Value type is <> - * Default value is `32768` - -The size of the TCP receive buffer to use when reading data - -[id="{version}-plugins-{type}s-{plugin}-reconnect_backoff_ms"] -===== `reconnect_backoff_ms` - - * Value type is <> - * Default value is `10` - -The amount of time to wait before attempting to reconnect to a given host when a connection fails. - -[id="{version}-plugins-{type}s-{plugin}-request_timeout_ms"] -===== `request_timeout_ms` - - * Value type is <> - * There is no default value for this setting. - -The configuration controls the maximum amount of time the client will wait -for the response of a request. If the response is not received before the timeout -elapses the client will resend the request if necessary or fail the request if -retries are exhausted. - -[id="{version}-plugins-{type}s-{plugin}-retries"] -===== `retries` - - * Value type is <> - * There is no default value for this setting. - -The default retry behavior is to retry until successful. To prevent data loss, -the use of this setting is discouraged. - -If you choose to set `retries`, a value greater than zero will cause the -client to only retry a fixed number of times. This will result in data loss -if a transport fault exists for longer than your retry count (network outage, -Kafka down, etc). - -A value less than zero is a configuration error. - -[id="{version}-plugins-{type}s-{plugin}-retry_backoff_ms"] -===== `retry_backoff_ms` - - * Value type is <> - * Default value is `100` - -The amount of time to wait before attempting to retry a failed produce request to a given topic partition. - -[id="{version}-plugins-{type}s-{plugin}-sasl_kerberos_service_name"] -===== `sasl_kerberos_service_name` - - * Value type is <> - * There is no default value for this setting. - -The Kerberos principal name that Kafka broker runs as. -This can be defined either in Kafka's JAAS config or in Kafka's config. - -[id="{version}-plugins-{type}s-{plugin}-sasl_mechanism"] -===== `sasl_mechanism` - - * Value type is <> - * Default value is `"GSSAPI"` - -http://kafka.apache.org/documentation.html#security_sasl[SASL mechanism] used for client connections. -This may be any mechanism for which a security provider is available. -GSSAPI is the default mechanism. - -[id="{version}-plugins-{type}s-{plugin}-security_protocol"] -===== `security_protocol` - - * Value can be any of: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL` - * Default value is `"PLAINTEXT"` - -Security protocol to use, which can be either of PLAINTEXT,SSL,SASL_PLAINTEXT,SASL_SSL - -[id="{version}-plugins-{type}s-{plugin}-send_buffer_bytes"] -===== `send_buffer_bytes` - - * Value type is <> - * Default value is `131072` - -The size of the TCP send buffer to use when sending data. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_password"] -===== `ssl_key_password` - - * Value type is <> - * There is no default value for this setting. - -The password of the private key in the key store file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_location"] -===== `ssl_keystore_location` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore path. - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_password"] -===== `ssl_keystore_password` - - * Value type is <> - * There is no default value for this setting. - -If client authentication is required, this setting stores the keystore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_keystore_type"] -===== `ssl_keystore_type` - - * Value type is <> - * There is no default value for this setting. - -The keystore type. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_location"] -===== `ssl_truststore_location` - - * Value type is <> - * There is no default value for this setting. - -The JKS truststore path to validate the Kafka broker's certificate. - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_password"] -===== `ssl_truststore_password` - - * Value type is <> - * There is no default value for this setting. - -The truststore password - -[id="{version}-plugins-{type}s-{plugin}-ssl_truststore_type"] -===== `ssl_truststore_type` - - * Value type is <> - * There is no default value for this setting. - -The truststore type. - -[id="{version}-plugins-{type}s-{plugin}-topic_id"] -===== `topic_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The topic to produce messages to - -[id="{version}-plugins-{type}s-{plugin}-value_serializer"] -===== `value_serializer` - - * Value type is <> - * Default value is `"org.apache.kafka.common.serialization.StringSerializer"` - -Serializer class for the value of the message - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/librato-index.asciidoc b/docs/versioned-plugins/outputs/librato-index.asciidoc deleted file mode 100644 index 8978e680f..000000000 --- a/docs/versioned-plugins/outputs/librato-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: librato -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::librato-v3.0.5.asciidoc[] -include::librato-v3.0.4.asciidoc[] -include::librato-v3.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/librato-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/librato-v3.0.2.asciidoc deleted file mode 100644 index 13c446a57..000000000 --- a/docs/versioned-plugins/outputs/librato-v3.0.2.asciidoc +++ /dev/null @@ -1,162 +0,0 @@ -:plugin: librato -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-librato/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Librato output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Librato Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-account_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-annotation>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-api_token>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-counter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-account_id"] -===== `account_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -This output lets you send metrics, annotations and alerts to -Librato based on Logstash events - -This is VERY experimental and inefficient right now. -Your Librato account -usually an email address - -[id="{version}-plugins-{type}s-{plugin}-annotation"] -===== `annotation` - - * Value type is <> - * Default value is `{}` - -Annotations -Registers an annotation with Librato -The only required field is `title` and `name`. -`start_time` and `end_time` will be set to `event.get("@timestamp").to_i` -You can add any other optional annotation values as well. -All values will be passed through `event.sprintf` - -Example: -[source,ruby] - { - "title" => "Logstash event on %{host}" - "name" => "logstash_stream" - } -or -[source,ruby] - { - "title" => "Logstash event" - "description" => "%{message}" - "name" => "logstash_stream" - } - -[id="{version}-plugins-{type}s-{plugin}-api_token"] -===== `api_token` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Librato API Token - -[id="{version}-plugins-{type}s-{plugin}-batch_size"] -===== `batch_size` - - * Value type is <> - * Default value is `"10"` - -Batch size -Number of events to batch up before sending to Librato. - - -[id="{version}-plugins-{type}s-{plugin}-counter"] -===== `counter` - - * Value type is <> - * Default value is `{}` - -Counters -Send data to Librato as a counter - -Example: -[source,ruby] - { - "value" => "1" - "source" => "%{host}" - "name" => "messages_received" - } - -Additionally, you can override the `measure_time` for the event. Must be a unix timestamp: -[source,ruby] - { - "value" => "1" - "source" => "%{host}" - "name" => "messages_received" - "measure_time" => "%{my_unixtime_field}" - } -Default is to use the event's timestamp - -[id="{version}-plugins-{type}s-{plugin}-gauge"] -===== `gauge` - - * Value type is <> - * Default value is `{}` - -Gauges -Send data to Librato as a gauge - -Example: -[source,ruby] - { - "value" => "%{bytes_received}" - "source" => "%{host}" - "name" => "apache_bytes" - } -Additionally, you can override the `measure_time` for the event. Must be a unix timestamp: -[source,ruby] - { - "value" => "%{bytes_received}" - "source" => "%{host}" - "name" => "apache_bytes" - "measure_time" => "%{my_unixtime_field} - } -Default is to use the event's timestamp - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/librato-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/librato-v3.0.4.asciidoc deleted file mode 100644 index fa8f68011..000000000 --- a/docs/versioned-plugins/outputs/librato-v3.0.4.asciidoc +++ /dev/null @@ -1,162 +0,0 @@ -:plugin: librato -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-librato/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Librato output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you send metrics, annotations, and alerts to -Librato based on Logstash events - -This is VERY experimental and inefficient right now. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Librato Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-account_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-annotation>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-api_token>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-counter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-account_id"] -===== `account_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Librato account -usually an email address - -[id="{version}-plugins-{type}s-{plugin}-annotation"] -===== `annotation` - - * Value type is <> - * Default value is `{}` - -Annotations -Registers an annotation with Librato -The only required field is `title` and `name`. -`start_time` and `end_time` will be set to `event.get("@timestamp").to_i` -You can add any other optional annotation values as well. -All values will be passed through `event.sprintf` - -Example: -[source,ruby] - { - "title" => "Logstash event on %{host}" - "name" => "logstash_stream" - } -or -[source,ruby] - { - "title" => "Logstash event" - "description" => "%{message}" - "name" => "logstash_stream" - } - -[id="{version}-plugins-{type}s-{plugin}-api_token"] -===== `api_token` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Librato API Token - -[id="{version}-plugins-{type}s-{plugin}-batch_size"] -===== `batch_size` - - * Value type is <> - * Default value is `"10"` - -Batch size -Number of events to batch up before sending to Librato. - - -[id="{version}-plugins-{type}s-{plugin}-counter"] -===== `counter` - - * Value type is <> - * Default value is `{}` - -Counters -Send data to Librato as a counter - -Example: -[source,ruby] - { - "value" => "1" - "source" => "%{host}" - "name" => "messages_received" - } - -Additionally, you can override the `measure_time` for the event. Must be a unix timestamp: -[source,ruby] - { - "value" => "1" - "source" => "%{host}" - "name" => "messages_received" - "measure_time" => "%{my_unixtime_field}" - } -Default is to use the event's timestamp - -[id="{version}-plugins-{type}s-{plugin}-gauge"] -===== `gauge` - - * Value type is <> - * Default value is `{}` - -Gauges -Send data to Librato as a gauge - -Example: -[source,ruby] - { - "value" => "%{bytes_received}" - "source" => "%{host}" - "name" => "apache_bytes" - } -Additionally, you can override the `measure_time` for the event. Must be a unix timestamp: -[source,ruby] - { - "value" => "%{bytes_received}" - "source" => "%{host}" - "name" => "apache_bytes" - "measure_time" => "%{my_unixtime_field} - } -Default is to use the event's timestamp - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/librato-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/librato-v3.0.5.asciidoc deleted file mode 100644 index caeb5bc4d..000000000 --- a/docs/versioned-plugins/outputs/librato-v3.0.5.asciidoc +++ /dev/null @@ -1,162 +0,0 @@ -:plugin: librato -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-librato/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Librato output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you send metrics, annotations, and alerts to -Librato based on Logstash events - -This is VERY experimental and inefficient right now. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Librato Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-account_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-annotation>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-api_token>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-batch_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-counter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-account_id"] -===== `account_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Librato account -usually an email address - -[id="{version}-plugins-{type}s-{plugin}-annotation"] -===== `annotation` - - * Value type is <> - * Default value is `{}` - -Annotations -Registers an annotation with Librato -The only required field is `title` and `name`. -`start_time` and `end_time` will be set to `event.get("@timestamp").to_i` -You can add any other optional annotation values as well. -All values will be passed through `event.sprintf` - -Example: -[source,ruby] - { - "title" => "Logstash event on %{host}" - "name" => "logstash_stream" - } -or -[source,ruby] - { - "title" => "Logstash event" - "description" => "%{message}" - "name" => "logstash_stream" - } - -[id="{version}-plugins-{type}s-{plugin}-api_token"] -===== `api_token` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Your Librato API Token - -[id="{version}-plugins-{type}s-{plugin}-batch_size"] -===== `batch_size` - - * Value type is <> - * Default value is `"10"` - -Batch size -Number of events to batch up before sending to Librato. - - -[id="{version}-plugins-{type}s-{plugin}-counter"] -===== `counter` - - * Value type is <> - * Default value is `{}` - -Counters -Send data to Librato as a counter - -Example: -[source,ruby] - { - "value" => "1" - "source" => "%{host}" - "name" => "messages_received" - } - -Additionally, you can override the `measure_time` for the event. Must be a unix timestamp: -[source,ruby] - { - "value" => "1" - "source" => "%{host}" - "name" => "messages_received" - "measure_time" => "%{my_unixtime_field}" - } -Default is to use the event's timestamp - -[id="{version}-plugins-{type}s-{plugin}-gauge"] -===== `gauge` - - * Value type is <> - * Default value is `{}` - -Gauges -Send data to Librato as a gauge - -Example: -[source,ruby] - { - "value" => "%{bytes_received}" - "source" => "%{host}" - "name" => "apache_bytes" - } -Additionally, you can override the `measure_time` for the event. Must be a unix timestamp: -[source,ruby] - { - "value" => "%{bytes_received}" - "source" => "%{host}" - "name" => "apache_bytes" - "measure_time" => "%{my_unixtime_field} - } -Default is to use the event's timestamp - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/logentries-index.asciidoc b/docs/versioned-plugins/outputs/logentries-index.asciidoc deleted file mode 100644 index bd9d46c10..000000000 --- a/docs/versioned-plugins/outputs/logentries-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: logentries -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/outputs/loggly-index.asciidoc b/docs/versioned-plugins/outputs/loggly-index.asciidoc deleted file mode 100644 index 7ed3a311f..000000000 --- a/docs/versioned-plugins/outputs/loggly-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: loggly -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::loggly-v3.0.3.asciidoc[] -include::loggly-v3.0.2.asciidoc[] -include::loggly-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/loggly-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/loggly-v3.0.1.asciidoc deleted file mode 100644 index 73593c01f..000000000 --- a/docs/versioned-plugins/outputs/loggly-v3.0.1.asciidoc +++ /dev/null @@ -1,164 +0,0 @@ -:plugin: loggly -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-loggly/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Loggly output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Ugly monkey patch to get around http://jira.codehaus.org/browse/JRUBY-5529 -Got a loggly account? Use logstash to ship logs to Loggly! - -This is most useful so you can use logstash to parse and structure -your logs and ship structured, json events to your account at Loggly. - -To use this, you'll need to use a Loggly input with type 'http' -and 'json logging' enabled. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Loggly Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-can_retry>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-proto>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-can_retry"] -===== `can_retry` - - * Value type is <> - * Default value is `true` - -Can Retry. -Setting this value true helps user to send multiple retry attempts if the first request fails - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"logs-01.loggly.com"` - -The hostname to send logs to. This should target the loggly http input -server which is usually "logs-01.loggly.com" (Gen2 account). -See Loggly HTTP endpoint documentation at -https://www.loggly.com/docs/http-endpoint/ - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The loggly http input key to send to. -This is usually visible in the Loggly 'Inputs' page as something like this: -.... - https://logs-01.loggly.net/inputs/abcdef12-3456-7890-abcd-ef0123456789 - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - \----------> key <-------------/ -.... -You can use `%{foo}` field lookups here if you need to pull the api key from -the event. This is mainly aimed at multitenant hosting providers who want -to offer shipping a customer's logs to that customer's loggly account. - -[id="{version}-plugins-{type}s-{plugin}-proto"] -===== `proto` - - * Value type is <> - * Default value is `"http"` - -Should the log action be sent over https instead of plain http - -[id="{version}-plugins-{type}s-{plugin}-proxy_host"] -===== `proxy_host` - - * Value type is <> - * There is no default value for this setting. - -Proxy Host - -[id="{version}-plugins-{type}s-{plugin}-proxy_password"] -===== `proxy_password` - - * Value type is <> - * Default value is `""` - -Proxy Password - -[id="{version}-plugins-{type}s-{plugin}-proxy_port"] -===== `proxy_port` - - * Value type is <> - * There is no default value for this setting. - -Proxy Port - -[id="{version}-plugins-{type}s-{plugin}-proxy_user"] -===== `proxy_user` - - * Value type is <> - * There is no default value for this setting. - -Proxy Username - -[id="{version}-plugins-{type}s-{plugin}-retry_count"] -===== `retry_count` - - * Value type is <> - * Default value is `5` - -Retry count. -It may be possible that the request may timeout due to slow Internet connection -if such condition appears, retry_count helps in retrying request for multiple times -It will try to submit request until retry_count and then halt - -[id="{version}-plugins-{type}s-{plugin}-tag"] -===== `tag` - - * Value type is <> - * Default value is `"logstash"` - -Loggly Tag -Tag helps you to find your logs in the Loggly dashboard easily -You can make a search in Loggly using tag as "tag:logstash-contrib" -or the tag set by you in the config file. - -You can use %{somefield} to allow for custom tag values. -Helpful for leveraging Loggly source groups. -https://www.loggly.com/docs/source-groups/ - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/loggly-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/loggly-v3.0.2.asciidoc deleted file mode 100644 index 599c3a5da..000000000 --- a/docs/versioned-plugins/outputs/loggly-v3.0.2.asciidoc +++ /dev/null @@ -1,164 +0,0 @@ -:plugin: loggly -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-loggly/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Loggly output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Ugly monkey patch to get around http://jira.codehaus.org/browse/JRUBY-5529 -Got a loggly account? Use logstash to ship logs to Loggly! - -This is most useful so you can use logstash to parse and structure -your logs and ship structured, json events to your account at Loggly. - -To use this, you'll need to use a Loggly input with type 'http' -and 'json logging' enabled. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Loggly Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-can_retry>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-proto>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-can_retry"] -===== `can_retry` - - * Value type is <> - * Default value is `true` - -Can Retry. -Setting this value true helps user to send multiple retry attempts if the first request fails - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"logs-01.loggly.com"` - -The hostname to send logs to. This should target the loggly http input -server which is usually "logs-01.loggly.com" (Gen2 account). -See Loggly HTTP endpoint documentation at -https://www.loggly.com/docs/http-endpoint/ - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The loggly http input key to send to. -This is usually visible in the Loggly 'Inputs' page as something like this: -.... - https://logs-01.loggly.net/inputs/abcdef12-3456-7890-abcd-ef0123456789 - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - \----------> key <-------------/ -.... -You can use `%{foo}` field lookups here if you need to pull the api key from -the event. This is mainly aimed at multitenant hosting providers who want -to offer shipping a customer's logs to that customer's loggly account. - -[id="{version}-plugins-{type}s-{plugin}-proto"] -===== `proto` - - * Value type is <> - * Default value is `"http"` - -Should the log action be sent over https instead of plain http - -[id="{version}-plugins-{type}s-{plugin}-proxy_host"] -===== `proxy_host` - - * Value type is <> - * There is no default value for this setting. - -Proxy Host - -[id="{version}-plugins-{type}s-{plugin}-proxy_password"] -===== `proxy_password` - - * Value type is <> - * Default value is `""` - -Proxy Password - -[id="{version}-plugins-{type}s-{plugin}-proxy_port"] -===== `proxy_port` - - * Value type is <> - * There is no default value for this setting. - -Proxy Port - -[id="{version}-plugins-{type}s-{plugin}-proxy_user"] -===== `proxy_user` - - * Value type is <> - * There is no default value for this setting. - -Proxy Username - -[id="{version}-plugins-{type}s-{plugin}-retry_count"] -===== `retry_count` - - * Value type is <> - * Default value is `5` - -Retry count. -It may be possible that the request may timeout due to slow Internet connection -if such condition appears, retry_count helps in retrying request for multiple times -It will try to submit request until retry_count and then halt - -[id="{version}-plugins-{type}s-{plugin}-tag"] -===== `tag` - - * Value type is <> - * Default value is `"logstash"` - -Loggly Tag -Tag helps you to find your logs in the Loggly dashboard easily -You can make a search in Loggly using tag as "tag:logstash-contrib" -or the tag set by you in the config file. - -You can use %{somefield} to allow for custom tag values. -Helpful for leveraging Loggly source groups. -https://www.loggly.com/docs/source-groups/ - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/loggly-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/loggly-v3.0.3.asciidoc deleted file mode 100644 index b699dd05c..000000000 --- a/docs/versioned-plugins/outputs/loggly-v3.0.3.asciidoc +++ /dev/null @@ -1,164 +0,0 @@ -:plugin: loggly -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-loggly/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Loggly output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Ugly monkey patch to get around http://jira.codehaus.org/browse/JRUBY-5529 -Got a loggly account? Use logstash to ship logs to Loggly! - -This is most useful so you can use logstash to parse and structure -your logs and ship structured, json events to your account at Loggly. - -To use this, you'll need to use a Loggly input with type 'http' -and 'json logging' enabled. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Loggly Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-can_retry>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-proto>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-can_retry"] -===== `can_retry` - - * Value type is <> - * Default value is `true` - -Can Retry. -Setting this value true helps user to send multiple retry attempts if the first request fails - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"logs-01.loggly.com"` - -The hostname to send logs to. This should target the loggly http input -server which is usually "logs-01.loggly.com" (Gen2 account). -See Loggly HTTP endpoint documentation at -https://www.loggly.com/docs/http-endpoint/ - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The loggly http input key to send to. -This is usually visible in the Loggly 'Inputs' page as something like this: -.... - https://logs-01.loggly.net/inputs/abcdef12-3456-7890-abcd-ef0123456789 - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - \----------> key <-------------/ -.... -You can use `%{foo}` field lookups here if you need to pull the api key from -the event. This is mainly aimed at multitenant hosting providers who want -to offer shipping a customer's logs to that customer's loggly account. - -[id="{version}-plugins-{type}s-{plugin}-proto"] -===== `proto` - - * Value type is <> - * Default value is `"http"` - -Should the log action be sent over https instead of plain http - -[id="{version}-plugins-{type}s-{plugin}-proxy_host"] -===== `proxy_host` - - * Value type is <> - * There is no default value for this setting. - -Proxy Host - -[id="{version}-plugins-{type}s-{plugin}-proxy_password"] -===== `proxy_password` - - * Value type is <> - * Default value is `""` - -Proxy Password - -[id="{version}-plugins-{type}s-{plugin}-proxy_port"] -===== `proxy_port` - - * Value type is <> - * There is no default value for this setting. - -Proxy Port - -[id="{version}-plugins-{type}s-{plugin}-proxy_user"] -===== `proxy_user` - - * Value type is <> - * There is no default value for this setting. - -Proxy Username - -[id="{version}-plugins-{type}s-{plugin}-retry_count"] -===== `retry_count` - - * Value type is <> - * Default value is `5` - -Retry count. -It may be possible that the request may timeout due to slow Internet connection -if such condition appears, retry_count helps in retrying request for multiple times -It will try to submit request until retry_count and then halt - -[id="{version}-plugins-{type}s-{plugin}-tag"] -===== `tag` - - * Value type is <> - * Default value is `"logstash"` - -Loggly Tag -Tag helps you to find your logs in the Loggly dashboard easily -You can make a search in Loggly using tag as "tag:logstash-contrib" -or the tag set by you in the config file. - -You can use %{somefield} to allow for custom tag values. -Helpful for leveraging Loggly source groups. -https://www.loggly.com/docs/source-groups/ - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/lumberjack-index.asciidoc b/docs/versioned-plugins/outputs/lumberjack-index.asciidoc deleted file mode 100644 index dc919efd6..000000000 --- a/docs/versioned-plugins/outputs/lumberjack-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: lumberjack -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-21 -| <> | 2017-06-23 -|======================================================================= - -include::lumberjack-v3.1.5.asciidoc[] -include::lumberjack-v3.1.3.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/lumberjack-v3.1.3.asciidoc b/docs/versioned-plugins/outputs/lumberjack-v3.1.3.asciidoc deleted file mode 100644 index 36916f5f6..000000000 --- a/docs/versioned-plugins/outputs/lumberjack-v3.1.3.asciidoc +++ /dev/null @@ -1,101 +0,0 @@ -:plugin: lumberjack -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-lumberjack/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Lumberjack output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Lumberjack Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-flush_size"] -===== `flush_size` - - * Value type is <> - * Default value is `1024` - -To make efficient calls to the lumberjack output we are buffering events locally. -if the number of events exceed the number the declared `flush_size` we will -send them to the logstash server. - -[id="{version}-plugins-{type}s-{plugin}-hosts"] -===== `hosts` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -list of addresses lumberjack can send to - -[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] -===== `idle_flush_time` - - * Value type is <> - * Default value is `1` - -The amount of time since last flush before a flush is forced. - -This setting helps ensure slow event rates don't get stuck in Logstash. -For example, if your `flush_size` is 100, and you have received 10 events, -and it has been more than `idle_flush_time` seconds since the last flush, -Logstash will flush those 10 events automatically. - -This helps keep both fast and slow log streams moving along in -near-real-time. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -the port to connect to - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] -===== `ssl_certificate` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -ssl certificate to use - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/lumberjack-v3.1.5.asciidoc b/docs/versioned-plugins/outputs/lumberjack-v3.1.5.asciidoc deleted file mode 100644 index b457655c5..000000000 --- a/docs/versioned-plugins/outputs/lumberjack-v3.1.5.asciidoc +++ /dev/null @@ -1,102 +0,0 @@ -:plugin: lumberjack -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.5 -:release_date: 2017-08-21 -:changelog_url: https://github.com/logstash-plugins/logstash-output-lumberjack/blob/v3.1.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Lumberjack output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output sends events using the lumberjack protocol. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Lumberjack Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-flush_size"] -===== `flush_size` - - * Value type is <> - * Default value is `1024` - -To make efficient calls to the lumberjack output we are buffering events locally. -if the number of events exceed the number the declared `flush_size` we will -send them to the logstash server. - -[id="{version}-plugins-{type}s-{plugin}-hosts"] -===== `hosts` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -list of addresses lumberjack can send to - -[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] -===== `idle_flush_time` - - * Value type is <> - * Default value is `1` - -The amount of time since last flush before a flush is forced. - -This setting helps ensure slow event rates don't get stuck in Logstash. -For example, if your `flush_size` is 100, and you have received 10 events, -and it has been more than `idle_flush_time` seconds since the last flush, -Logstash will flush those 10 events automatically. - -This helps keep both fast and slow log streams moving along in -near-real-time. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -the port to connect to - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] -===== `ssl_certificate` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -ssl certificate to use - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/metriccatcher-index.asciidoc b/docs/versioned-plugins/outputs/metriccatcher-index.asciidoc deleted file mode 100644 index 928c1c4e5..000000000 --- a/docs/versioned-plugins/outputs/metriccatcher-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: metriccatcher -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::metriccatcher-v3.0.3.asciidoc[] -include::metriccatcher-v3.0.2.asciidoc[] -include::metriccatcher-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/metriccatcher-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/metriccatcher-v3.0.1.asciidoc deleted file mode 100644 index 6c0cc647f..000000000 --- a/docs/versioned-plugins/outputs/metriccatcher-v3.0.1.asciidoc +++ /dev/null @@ -1,164 +0,0 @@ -:plugin: metriccatcher -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-metriccatcher/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Metriccatcher output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output ships metrics to MetricCatcher, allowing you to -utilize Coda Hale's Metrics. - -More info on MetricCatcher: https://github.com/clearspring/MetricCatcher - -At Clearspring, we use it to count the response codes from Apache logs: -[source,ruby] - metriccatcher { - host => "localhost" - port => "1420" - type => "apache-access" - fields => [ "response" ] - meter => { - "%{host}.apache.response.%{response}" => "1" - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Metriccatcher Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-biased>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-counter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-meter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timer>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-uniform>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-biased"] -===== `biased` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-counter"] -===== `counter` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. Example: -[source,ruby] - counter => { "%{host}.apache.hits.%{response} => "1" } - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-gauge"] -===== `gauge` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The address of the MetricCatcher - -[id="{version}-plugins-{type}s-{plugin}-meter"] -===== `meter` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `1420` - -The port to connect on your MetricCatcher - -[id="{version}-plugins-{type}s-{plugin}-timer"] -===== `timer` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like %{host} -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. Example: -[source,ruby] - timer => { "%{host}.apache.response_time => "%{response_time}" } - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-uniform"] -===== `uniform` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/metriccatcher-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/metriccatcher-v3.0.2.asciidoc deleted file mode 100644 index e00323ef6..000000000 --- a/docs/versioned-plugins/outputs/metriccatcher-v3.0.2.asciidoc +++ /dev/null @@ -1,164 +0,0 @@ -:plugin: metriccatcher -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-metriccatcher/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Metriccatcher output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output ships metrics to MetricCatcher, allowing you to -utilize Coda Hale's Metrics. - -More info on MetricCatcher: https://github.com/clearspring/MetricCatcher - -At Clearspring, we use it to count the response codes from Apache logs: -[source,ruby] - metriccatcher { - host => "localhost" - port => "1420" - type => "apache-access" - fields => [ "response" ] - meter => { - "%{host}.apache.response.%{response}" => "1" - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Metriccatcher Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-biased>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-counter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-meter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timer>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-uniform>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-biased"] -===== `biased` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-counter"] -===== `counter` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. Example: -[source,ruby] - counter => { "%{host}.apache.hits.%{response} => "1" } - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-gauge"] -===== `gauge` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The address of the MetricCatcher - -[id="{version}-plugins-{type}s-{plugin}-meter"] -===== `meter` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `1420` - -The port to connect on your MetricCatcher - -[id="{version}-plugins-{type}s-{plugin}-timer"] -===== `timer` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like %{host} -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. Example: -[source,ruby] - timer => { "%{host}.apache.response_time => "%{response_time}" } - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-uniform"] -===== `uniform` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/metriccatcher-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/metriccatcher-v3.0.3.asciidoc deleted file mode 100644 index c3bb52735..000000000 --- a/docs/versioned-plugins/outputs/metriccatcher-v3.0.3.asciidoc +++ /dev/null @@ -1,164 +0,0 @@ -:plugin: metriccatcher -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-metriccatcher/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Metriccatcher output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output ships metrics to MetricCatcher, allowing you to -utilize Coda Hale's Metrics. - -More info on MetricCatcher: https://github.com/clearspring/MetricCatcher - -At Clearspring, we use it to count the response codes from Apache logs: -[source,ruby] - metriccatcher { - host => "localhost" - port => "1420" - type => "apache-access" - fields => [ "response" ] - meter => { - "%{host}.apache.response.%{response}" => "1" - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Metriccatcher Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-biased>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-counter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-meter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timer>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-uniform>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-biased"] -===== `biased` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-counter"] -===== `counter` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. Example: -[source,ruby] - counter => { "%{host}.apache.hits.%{response} => "1" } - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-gauge"] -===== `gauge` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The address of the MetricCatcher - -[id="{version}-plugins-{type}s-{plugin}-meter"] -===== `meter` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `1420` - -The port to connect on your MetricCatcher - -[id="{version}-plugins-{type}s-{plugin}-timer"] -===== `timer` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like %{host} -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. Example: -[source,ruby] - timer => { "%{host}.apache.response_time => "%{response_time}" } - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-uniform"] -===== `uniform` - - * Value type is <> - * There is no default value for this setting. - -The metrics to send. This supports dynamic strings like `%{host}` -for metric names and also for values. This is a hash field with key -of the metric name, value of the metric value. - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/monasca_log_api-index.asciidoc b/docs/versioned-plugins/outputs/monasca_log_api-index.asciidoc deleted file mode 100644 index 6da217799..000000000 --- a/docs/versioned-plugins/outputs/monasca_log_api-index.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -:plugin: monasca_log_api -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-10-25 -|======================================================================= - -include::monasca_log_api-v1.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/monasca_log_api-v1.0.2.asciidoc b/docs/versioned-plugins/outputs/monasca_log_api-v1.0.2.asciidoc deleted file mode 100644 index 1df18f424..000000000 --- a/docs/versioned-plugins/outputs/monasca_log_api-v1.0.2.asciidoc +++ /dev/null @@ -1,179 +0,0 @@ -:plugin: monasca_log_api -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.2 -:release_date: 2017-10-25 -:changelog_url: https://github.com/logstash-plugins/logstash-output-monasca_log_api/blob/v1.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Monasca_log_api output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -relative requirements -This Logstash Output plugin, sends events to monasca-api. -It authenticates against keystone and gets a token. -The token is used to authenticate against the monasca-api and send log events. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Monasca_log_api Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dimensions>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-elapsed_time_sec>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystone_api_insecure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystone_api_url>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-max_data_size_kb>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-monasca_log_api_insecure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-monasca_log_api_url>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-num_of_logs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-project_domain_name>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-project_name>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-user_domain_name>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-delay"] -===== `delay` - - * Value type is <> - * Default value is `10` - - - -[id="{version}-plugins-{type}s-{plugin}-dimensions"] -===== `dimensions` - - * Value type is <> - * There is no default value for this setting. - -global dimensions - -[id="{version}-plugins-{type}s-{plugin}-elapsed_time_sec"] -===== `elapsed_time_sec` - - * Value type is <> - * Default value is `30` - - - -[id="{version}-plugins-{type}s-{plugin}-keystone_api_insecure"] -===== `keystone_api_insecure` - - * Value type is <> - * Default value is `false` - - - -[id="{version}-plugins-{type}s-{plugin}-keystone_api_url"] -===== `keystone_api_url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -keystone configuration - -[id="{version}-plugins-{type}s-{plugin}-max_data_size_kb"] -===== `max_data_size_kb` - - * Value type is <> - * Default value is `5120` - - - -[id="{version}-plugins-{type}s-{plugin}-monasca_log_api_insecure"] -===== `monasca_log_api_insecure` - - * Value type is <> - * Default value is `false` - - - -[id="{version}-plugins-{type}s-{plugin}-monasca_log_api_url"] -===== `monasca_log_api_url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -monasca-log-api configuration - -[id="{version}-plugins-{type}s-{plugin}-num_of_logs"] -===== `num_of_logs` - - * Value type is <> - * Default value is `125` - - - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-project_domain_name"] -===== `project_domain_name` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-project_name"] -===== `project_name` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-user_domain_name"] -===== `user_domain_name` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-username"] -===== `username` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - - - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/mongodb-index.asciidoc b/docs/versioned-plugins/outputs/mongodb-index.asciidoc deleted file mode 100644 index d00bc1b48..000000000 --- a/docs/versioned-plugins/outputs/mongodb-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: mongodb -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::mongodb-v3.1.3.asciidoc[] -include::mongodb-v3.1.2.asciidoc[] -include::mongodb-v3.1.1.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/mongodb-v3.1.1.asciidoc b/docs/versioned-plugins/outputs/mongodb-v3.1.1.asciidoc deleted file mode 100644 index 27597f9ce..000000000 --- a/docs/versioned-plugins/outputs/mongodb-v3.1.1.asciidoc +++ /dev/null @@ -1,134 +0,0 @@ -:plugin: mongodb -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-mongodb/blob/v3.1.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Mongodb output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output writes events to MongoDB. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Mongodb Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-bulk>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-collection>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-database>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-generateId>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-isodate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-uri>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-bulk"] -===== `bulk` - - * Value type is <> - * Default value is `false` - -Bulk insert flag, set to true to allow bulk insertion, else it will insert events one by one. - -[id="{version}-plugins-{type}s-{plugin}-bulk_interval"] -===== `bulk_interval` - - * Value type is <> - * Default value is `2` - -Bulk interval, Used to insert events periodically if the "bulk" flag is activated. - -[id="{version}-plugins-{type}s-{plugin}-bulk_size"] -===== `bulk_size` - - * Value type is <> - * Default value is `900` - -Bulk events number, if the number of events to insert into a collection raise that limit, it will be bulk inserted -whatever the bulk interval value (mongodb hard limit is 1000). - -[id="{version}-plugins-{type}s-{plugin}-collection"] -===== `collection` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The collection to use. This value can use `%{foo}` values to dynamically -select a collection based on data in the event. - -[id="{version}-plugins-{type}s-{plugin}-database"] -===== `database` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The database to use. - -[id="{version}-plugins-{type}s-{plugin}-generateId"] -===== `generateId` - - * Value type is <> - * Default value is `false` - -If true, an "_id" field will be added to the document before insertion. -The "_id" field will use the timestamp of the event and overwrite an existing -"_id" field in the event. - -[id="{version}-plugins-{type}s-{plugin}-isodate"] -===== `isodate` - - * Value type is <> - * Default value is `false` - -If true, store the @timestamp field in MongoDB as an ISODate type instead -of an ISO8601 string. For more information about this, see -http://www.mongodb.org/display/DOCS/Dates. - -[id="{version}-plugins-{type}s-{plugin}-retry_delay"] -===== `retry_delay` - - * Value type is <> - * Default value is `3` - -The number of seconds to wait after failure before retrying. - -[id="{version}-plugins-{type}s-{plugin}-uri"] -===== `uri` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A MongoDB URI to connect to. -See http://docs.mongodb.org/manual/reference/connection-string/. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/mongodb-v3.1.2.asciidoc b/docs/versioned-plugins/outputs/mongodb-v3.1.2.asciidoc deleted file mode 100644 index 72d690fe4..000000000 --- a/docs/versioned-plugins/outputs/mongodb-v3.1.2.asciidoc +++ /dev/null @@ -1,134 +0,0 @@ -:plugin: mongodb -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.2 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-mongodb/blob/v3.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Mongodb output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output writes events to MongoDB. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Mongodb Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-bulk>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-collection>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-database>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-generateId>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-isodate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-uri>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-bulk"] -===== `bulk` - - * Value type is <> - * Default value is `false` - -Bulk insert flag, set to true to allow bulk insertion, else it will insert events one by one. - -[id="{version}-plugins-{type}s-{plugin}-bulk_interval"] -===== `bulk_interval` - - * Value type is <> - * Default value is `2` - -Bulk interval, Used to insert events periodically if the "bulk" flag is activated. - -[id="{version}-plugins-{type}s-{plugin}-bulk_size"] -===== `bulk_size` - - * Value type is <> - * Default value is `900` - -Bulk events number, if the number of events to insert into a collection raise that limit, it will be bulk inserted -whatever the bulk interval value (mongodb hard limit is 1000). - -[id="{version}-plugins-{type}s-{plugin}-collection"] -===== `collection` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The collection to use. This value can use `%{foo}` values to dynamically -select a collection based on data in the event. - -[id="{version}-plugins-{type}s-{plugin}-database"] -===== `database` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The database to use. - -[id="{version}-plugins-{type}s-{plugin}-generateId"] -===== `generateId` - - * Value type is <> - * Default value is `false` - -If true, an "_id" field will be added to the document before insertion. -The "_id" field will use the timestamp of the event and overwrite an existing -"_id" field in the event. - -[id="{version}-plugins-{type}s-{plugin}-isodate"] -===== `isodate` - - * Value type is <> - * Default value is `false` - -If true, store the @timestamp field in MongoDB as an ISODate type instead -of an ISO8601 string. For more information about this, see -http://www.mongodb.org/display/DOCS/Dates. - -[id="{version}-plugins-{type}s-{plugin}-retry_delay"] -===== `retry_delay` - - * Value type is <> - * Default value is `3` - -The number of seconds to wait after failure before retrying. - -[id="{version}-plugins-{type}s-{plugin}-uri"] -===== `uri` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A MongoDB URI to connect to. -See http://docs.mongodb.org/manual/reference/connection-string/. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/mongodb-v3.1.3.asciidoc b/docs/versioned-plugins/outputs/mongodb-v3.1.3.asciidoc deleted file mode 100644 index dfb04421d..000000000 --- a/docs/versioned-plugins/outputs/mongodb-v3.1.3.asciidoc +++ /dev/null @@ -1,134 +0,0 @@ -:plugin: mongodb -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-mongodb/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Mongodb output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output writes events to MongoDB. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Mongodb Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-bulk>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-collection>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-database>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-generateId>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-isodate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-uri>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-bulk"] -===== `bulk` - - * Value type is <> - * Default value is `false` - -Bulk insert flag, set to true to allow bulk insertion, else it will insert events one by one. - -[id="{version}-plugins-{type}s-{plugin}-bulk_interval"] -===== `bulk_interval` - - * Value type is <> - * Default value is `2` - -Bulk interval, Used to insert events periodically if the "bulk" flag is activated. - -[id="{version}-plugins-{type}s-{plugin}-bulk_size"] -===== `bulk_size` - - * Value type is <> - * Default value is `900` - -Bulk events number, if the number of events to insert into a collection raise that limit, it will be bulk inserted -whatever the bulk interval value (mongodb hard limit is 1000). - -[id="{version}-plugins-{type}s-{plugin}-collection"] -===== `collection` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The collection to use. This value can use `%{foo}` values to dynamically -select a collection based on data in the event. - -[id="{version}-plugins-{type}s-{plugin}-database"] -===== `database` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The database to use. - -[id="{version}-plugins-{type}s-{plugin}-generateId"] -===== `generateId` - - * Value type is <> - * Default value is `false` - -If true, an "_id" field will be added to the document before insertion. -The "_id" field will use the timestamp of the event and overwrite an existing -"_id" field in the event. - -[id="{version}-plugins-{type}s-{plugin}-isodate"] -===== `isodate` - - * Value type is <> - * Default value is `false` - -If true, store the @timestamp field in MongoDB as an ISODate type instead -of an ISO8601 string. For more information about this, see -http://www.mongodb.org/display/DOCS/Dates. - -[id="{version}-plugins-{type}s-{plugin}-retry_delay"] -===== `retry_delay` - - * Value type is <> - * Default value is `3` - -The number of seconds to wait after failure before retrying. - -[id="{version}-plugins-{type}s-{plugin}-uri"] -===== `uri` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -A MongoDB URI to connect to. -See http://docs.mongodb.org/manual/reference/connection-string/. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/nagios-index.asciidoc b/docs/versioned-plugins/outputs/nagios-index.asciidoc deleted file mode 100644 index a1bf00c2f..000000000 --- a/docs/versioned-plugins/outputs/nagios-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: nagios -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-07 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::nagios-v3.0.5.asciidoc[] -include::nagios-v3.0.4.asciidoc[] -include::nagios-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/nagios-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/nagios-v3.0.3.asciidoc deleted file mode 100644 index 9c0ce8807..000000000 --- a/docs/versioned-plugins/outputs/nagios-v3.0.3.asciidoc +++ /dev/null @@ -1,90 +0,0 @@ -:plugin: nagios -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-nagios/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Nagios output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The Nagios output is used for sending passive check results to Nagios via the -Nagios command file. This output currently supports Nagios 3. - -For this output to work, your event _must_ have the following Logstash event fields: - - * `nagios_host` - * `nagios_service` - -These Logstash event fields are supported, but optional: - - * `nagios_annotation` - * `nagios_level` (overrides `nagios_level` configuration option) - -There are two configuration options: - - * `commandfile` - The location of the Nagios external command file. Defaults - to '/var/lib/nagios3/rw/nagios.cmd' - * `nagios_level` - Specifies the level of the check to be sent. Defaults to - CRITICAL and can be overriden by setting the "nagios_level" field to one - of "OK", "WARNING", "CRITICAL", or "UNKNOWN" -[source,ruby] - output{ - if [message] =~ /(error|ERROR|CRITICAL)/ { - nagios { - # your config here - } - } - } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Nagios Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-commandfile>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-nagios_level>> |<>, one of `["0", "1", "2", "3"]`|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-commandfile"] -===== `commandfile` - - * Value type is <> - * Default value is `"/var/lib/nagios3/rw/nagios.cmd"` - -The full path to your Nagios command file. - -[id="{version}-plugins-{type}s-{plugin}-nagios_level"] -===== `nagios_level` - - * Value can be any of: `0`, `1`, `2`, `3` - * Default value is `"2"` - -The Nagios check level. Should be one of 0=OK, 1=WARNING, 2=CRITICAL, -3=UNKNOWN. Defaults to 2 - CRITICAL. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/nagios-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/nagios-v3.0.4.asciidoc deleted file mode 100644 index a4ed51d13..000000000 --- a/docs/versioned-plugins/outputs/nagios-v3.0.4.asciidoc +++ /dev/null @@ -1,90 +0,0 @@ -:plugin: nagios -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-nagios/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Nagios output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The Nagios output is used for sending passive check results to Nagios via the -Nagios command file. This output currently supports Nagios 3. - -For this output to work, your event _must_ have the following Logstash event fields: - - * `nagios_host` - * `nagios_service` - -These Logstash event fields are supported, but optional: - - * `nagios_annotation` - * `nagios_level` (overrides `nagios_level` configuration option) - -There are two configuration options: - - * `commandfile` - The location of the Nagios external command file. Defaults - to '/var/lib/nagios3/rw/nagios.cmd' - * `nagios_level` - Specifies the level of the check to be sent. Defaults to - CRITICAL and can be overriden by setting the "nagios_level" field to one - of "OK", "WARNING", "CRITICAL", or "UNKNOWN" -[source,ruby] - output{ - if [message] =~ /(error|ERROR|CRITICAL)/ { - nagios { - # your config here - } - } - } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Nagios Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-commandfile>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-nagios_level>> |<>, one of `["0", "1", "2", "3"]`|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-commandfile"] -===== `commandfile` - - * Value type is <> - * Default value is `"/var/lib/nagios3/rw/nagios.cmd"` - -The full path to your Nagios command file. - -[id="{version}-plugins-{type}s-{plugin}-nagios_level"] -===== `nagios_level` - - * Value can be any of: `0`, `1`, `2`, `3` - * Default value is `"2"` - -The Nagios check level. Should be one of 0=OK, 1=WARNING, 2=CRITICAL, -3=UNKNOWN. Defaults to 2 - CRITICAL. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/nagios-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/nagios-v3.0.5.asciidoc deleted file mode 100644 index 070121336..000000000 --- a/docs/versioned-plugins/outputs/nagios-v3.0.5.asciidoc +++ /dev/null @@ -1,90 +0,0 @@ -:plugin: nagios -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-07 -:changelog_url: https://github.com/logstash-plugins/logstash-output-nagios/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Nagios output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The Nagios output is used for sending passive check results to Nagios via the -Nagios command file. This output currently supports Nagios 3. - -For this output to work, your event _must_ have the following Logstash event fields: - - * `nagios_host` - * `nagios_service` - -These Logstash event fields are supported, but optional: - - * `nagios_annotation` - * `nagios_level` (overrides `nagios_level` configuration option) - -There are two configuration options: - - * `commandfile` - The location of the Nagios external command file. Defaults - to '/var/lib/nagios3/rw/nagios.cmd' - * `nagios_level` - Specifies the level of the check to be sent. Defaults to - CRITICAL and can be overriden by setting the "nagios_level" field to one - of "OK", "WARNING", "CRITICAL", or "UNKNOWN" -[source,ruby] - output{ - if [message] =~ /(error|ERROR|CRITICAL)/ { - nagios { - # your config here - } - } - } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Nagios Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-commandfile>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-nagios_level>> |<>, one of `["0", "1", "2", "3"]`|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-commandfile"] -===== `commandfile` - - * Value type is <> - * Default value is `"/var/lib/nagios3/rw/nagios.cmd"` - -The full path to your Nagios command file. - -[id="{version}-plugins-{type}s-{plugin}-nagios_level"] -===== `nagios_level` - - * Value can be any of: `0`, `1`, `2`, `3` - * Default value is `"2"` - -The Nagios check level. Should be one of 0=OK, 1=WARNING, 2=CRITICAL, -3=UNKNOWN. Defaults to 2 - CRITICAL. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/nagios_nsca-index.asciidoc b/docs/versioned-plugins/outputs/nagios_nsca-index.asciidoc deleted file mode 100644 index c66b663ee..000000000 --- a/docs/versioned-plugins/outputs/nagios_nsca-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: nagios_nsca -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::nagios_nsca-v3.0.5.asciidoc[] -include::nagios_nsca-v3.0.4.asciidoc[] -include::nagios_nsca-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/nagios_nsca-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/nagios_nsca-v3.0.3.asciidoc deleted file mode 100644 index d9996e888..000000000 --- a/docs/versioned-plugins/outputs/nagios_nsca-v3.0.3.asciidoc +++ /dev/null @@ -1,141 +0,0 @@ -:plugin: nagios_nsca -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-nagios_nsca/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Nagios_nsca output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The nagios_nsca output is used for sending passive check results to Nagios -through the NSCA protocol. - -This is useful if your Nagios server is not the same as the source host from -where you want to send logs or alerts. If you only have one server, this -output is probably overkill # for you, take a look at the 'nagios' output -instead. - -Here is a sample config using the nagios_nsca output: -[source,ruby] - output { - nagios_nsca { - # specify the hostname or ip of your nagios server - host => "nagios.example.com" - - # specify the port to connect to - port => 5667 - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Nagios_nsca Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nagios_host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nagios_service>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nagios_status>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-send_nsca_bin>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-send_nsca_config>> |a valid filesystem path|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The nagios host or IP to send logs to. It should have a NSCA daemon running. - -[id="{version}-plugins-{type}s-{plugin}-message_format"] -===== `message_format` - - * Value type is <> - * Default value is `"%{@timestamp} %{host}: %{message}"` - -The format to use when writing events to nagios. This value -supports any string and can include `%{name}` and other dynamic -strings. - -[id="{version}-plugins-{type}s-{plugin}-nagios_host"] -===== `nagios_host` - - * Value type is <> - * Default value is `"%{host}"` - -The nagios 'host' you want to submit a passive check result to. This -parameter accepts interpolation, e.g. you can use `@source_host` or other -logstash internal variables. - -[id="{version}-plugins-{type}s-{plugin}-nagios_service"] -===== `nagios_service` - - * Value type is <> - * Default value is `"LOGSTASH"` - -The nagios 'service' you want to submit a passive check result to. This -parameter accepts interpolation, e.g. you can use `@source_host` or other -logstash internal variables. - -[id="{version}-plugins-{type}s-{plugin}-nagios_status"] -===== `nagios_status` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The status to send to nagios. Should be 0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5667` - -The port where the NSCA daemon on the nagios host listens. - -[id="{version}-plugins-{type}s-{plugin}-send_nsca_bin"] -===== `send_nsca_bin` - - * Value type is <> - * Default value is `"/usr/sbin/send_nsca"` - -The path to the 'send_nsca' binary on the local host. - -[id="{version}-plugins-{type}s-{plugin}-send_nsca_config"] -===== `send_nsca_config` - - * Value type is <> - * There is no default value for this setting. - -The path to the send_nsca config file on the local host. -Leave blank if you don't want to provide a config file. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/nagios_nsca-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/nagios_nsca-v3.0.4.asciidoc deleted file mode 100644 index 47391e705..000000000 --- a/docs/versioned-plugins/outputs/nagios_nsca-v3.0.4.asciidoc +++ /dev/null @@ -1,141 +0,0 @@ -:plugin: nagios_nsca -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-nagios_nsca/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Nagios_nsca output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The nagios_nsca output is used for sending passive check results to Nagios -through the NSCA protocol. - -This is useful if your Nagios server is not the same as the source host from -where you want to send logs or alerts. If you only have one server, this -output is probably overkill # for you, take a look at the 'nagios' output -instead. - -Here is a sample config using the nagios_nsca output: -[source,ruby] - output { - nagios_nsca { - # specify the hostname or ip of your nagios server - host => "nagios.example.com" - - # specify the port to connect to - port => 5667 - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Nagios_nsca Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nagios_host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nagios_service>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nagios_status>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-send_nsca_bin>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-send_nsca_config>> |a valid filesystem path|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The nagios host or IP to send logs to. It should have a NSCA daemon running. - -[id="{version}-plugins-{type}s-{plugin}-message_format"] -===== `message_format` - - * Value type is <> - * Default value is `"%{@timestamp} %{host}: %{message}"` - -The format to use when writing events to nagios. This value -supports any string and can include `%{name}` and other dynamic -strings. - -[id="{version}-plugins-{type}s-{plugin}-nagios_host"] -===== `nagios_host` - - * Value type is <> - * Default value is `"%{host}"` - -The nagios 'host' you want to submit a passive check result to. This -parameter accepts interpolation, e.g. you can use `@source_host` or other -logstash internal variables. - -[id="{version}-plugins-{type}s-{plugin}-nagios_service"] -===== `nagios_service` - - * Value type is <> - * Default value is `"LOGSTASH"` - -The nagios 'service' you want to submit a passive check result to. This -parameter accepts interpolation, e.g. you can use `@source_host` or other -logstash internal variables. - -[id="{version}-plugins-{type}s-{plugin}-nagios_status"] -===== `nagios_status` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The status to send to nagios. Should be 0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5667` - -The port where the NSCA daemon on the nagios host listens. - -[id="{version}-plugins-{type}s-{plugin}-send_nsca_bin"] -===== `send_nsca_bin` - - * Value type is <> - * Default value is `"/usr/sbin/send_nsca"` - -The path to the 'send_nsca' binary on the local host. - -[id="{version}-plugins-{type}s-{plugin}-send_nsca_config"] -===== `send_nsca_config` - - * Value type is <> - * There is no default value for this setting. - -The path to the send_nsca config file on the local host. -Leave blank if you don't want to provide a config file. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/nagios_nsca-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/nagios_nsca-v3.0.5.asciidoc deleted file mode 100644 index d51955ecc..000000000 --- a/docs/versioned-plugins/outputs/nagios_nsca-v3.0.5.asciidoc +++ /dev/null @@ -1,141 +0,0 @@ -:plugin: nagios_nsca -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-nagios_nsca/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Nagios_nsca output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The nagios_nsca output is used for sending passive check results to Nagios -through the NSCA protocol. - -This is useful if your Nagios server is not the same as the source host from -where you want to send logs or alerts. If you only have one server, this -output is probably overkill # for you, take a look at the 'nagios' output -instead. - -Here is a sample config using the nagios_nsca output: -[source,ruby] - output { - nagios_nsca { - # specify the hostname or ip of your nagios server - host => "nagios.example.com" - - # specify the port to connect to - port => 5667 - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Nagios_nsca Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nagios_host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nagios_service>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nagios_status>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-send_nsca_bin>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-send_nsca_config>> |a valid filesystem path|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The nagios host or IP to send logs to. It should have a NSCA daemon running. - -[id="{version}-plugins-{type}s-{plugin}-message_format"] -===== `message_format` - - * Value type is <> - * Default value is `"%{@timestamp} %{host}: %{message}"` - -The format to use when writing events to nagios. This value -supports any string and can include `%{name}` and other dynamic -strings. - -[id="{version}-plugins-{type}s-{plugin}-nagios_host"] -===== `nagios_host` - - * Value type is <> - * Default value is `"%{host}"` - -The nagios 'host' you want to submit a passive check result to. This -parameter accepts interpolation, e.g. you can use `@source_host` or other -logstash internal variables. - -[id="{version}-plugins-{type}s-{plugin}-nagios_service"] -===== `nagios_service` - - * Value type is <> - * Default value is `"LOGSTASH"` - -The nagios 'service' you want to submit a passive check result to. This -parameter accepts interpolation, e.g. you can use `@source_host` or other -logstash internal variables. - -[id="{version}-plugins-{type}s-{plugin}-nagios_status"] -===== `nagios_status` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The status to send to nagios. Should be 0 = OK, 1 = WARNING, 2 = CRITICAL, 3 = UNKNOWN - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5667` - -The port where the NSCA daemon on the nagios host listens. - -[id="{version}-plugins-{type}s-{plugin}-send_nsca_bin"] -===== `send_nsca_bin` - - * Value type is <> - * Default value is `"/usr/sbin/send_nsca"` - -The path to the 'send_nsca' binary on the local host. - -[id="{version}-plugins-{type}s-{plugin}-send_nsca_config"] -===== `send_nsca_config` - - * Value type is <> - * There is no default value for this setting. - -The path to the send_nsca config file on the local host. -Leave blank if you don't want to provide a config file. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/neo4j-index.asciidoc b/docs/versioned-plugins/outputs/neo4j-index.asciidoc deleted file mode 100644 index 727b126d6..000000000 --- a/docs/versioned-plugins/outputs/neo4j-index.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -:plugin: neo4j -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-06-23 -|======================================================================= - -include::neo4j-v2.0.5.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/neo4j-v2.0.5.asciidoc b/docs/versioned-plugins/outputs/neo4j-v2.0.5.asciidoc deleted file mode 100644 index e742a1096..000000000 --- a/docs/versioned-plugins/outputs/neo4j-v2.0.5.asciidoc +++ /dev/null @@ -1,53 +0,0 @@ -:plugin: neo4j -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-neo4j/blob/v2.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Neo4j output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Neo4j Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The path within your file system where the neo4j database is located - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/newrelic-index.asciidoc b/docs/versioned-plugins/outputs/newrelic-index.asciidoc deleted file mode 100644 index 43f875d50..000000000 --- a/docs/versioned-plugins/outputs/newrelic-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: newrelic -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/outputs/null-index.asciidoc b/docs/versioned-plugins/outputs/null-index.asciidoc deleted file mode 100644 index ff281bcbf..000000000 --- a/docs/versioned-plugins/outputs/null-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: null -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::null-v3.0.4.asciidoc[] -include::null-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/null-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/null-v3.0.3.asciidoc deleted file mode 100644 index 6d8beb532..000000000 --- a/docs/versioned-plugins/outputs/null-v3.0.3.asciidoc +++ /dev/null @@ -1,37 +0,0 @@ -:plugin: null -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-null/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Null output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -A null output. This is useful for testing logstash inputs and filters for -performance. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Null Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -|======================================================================= - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/null-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/null-v3.0.4.asciidoc deleted file mode 100644 index dfb5c1133..000000000 --- a/docs/versioned-plugins/outputs/null-v3.0.4.asciidoc +++ /dev/null @@ -1,37 +0,0 @@ -:plugin: null -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-null/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Null output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -A null output. This is useful for testing logstash inputs and filters for -performance. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Null Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -|======================================================================= - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/opentsdb-index.asciidoc b/docs/versioned-plugins/outputs/opentsdb-index.asciidoc deleted file mode 100644 index f1891d463..000000000 --- a/docs/versioned-plugins/outputs/opentsdb-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: opentsdb -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::opentsdb-v3.1.4.asciidoc[] -include::opentsdb-v3.1.3.asciidoc[] -include::opentsdb-v3.1.2.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/opentsdb-v3.1.2.asciidoc b/docs/versioned-plugins/outputs/opentsdb-v3.1.2.asciidoc deleted file mode 100644 index cc92ac010..000000000 --- a/docs/versioned-plugins/outputs/opentsdb-v3.1.2.asciidoc +++ /dev/null @@ -1,87 +0,0 @@ -:plugin: opentsdb -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-opentsdb/blob/v3.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Opentsdb output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output allows you to pull metrics from your logs and ship them to -opentsdb. Opentsdb is an open source tool for storing and graphing metrics. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Opentsdb Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The address of the opentsdb server. - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The metric(s) to use. This supports dynamic strings like %{source_host} -for metric names and also for values. This is an array field with key -of the metric name, value of the metric value, and multiple tag,values . Example: -[source,ruby] - [ - "%{host}/uptime", - %{uptime_1m} " , - "hostname" , - "%{host} - "anotherhostname" , - "%{host} - ] - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `4242` - -The port to connect on your graphite server. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/opentsdb-v3.1.3.asciidoc b/docs/versioned-plugins/outputs/opentsdb-v3.1.3.asciidoc deleted file mode 100644 index b15df20a0..000000000 --- a/docs/versioned-plugins/outputs/opentsdb-v3.1.3.asciidoc +++ /dev/null @@ -1,87 +0,0 @@ -:plugin: opentsdb -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-opentsdb/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Opentsdb output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output allows you to pull metrics from your logs and ship them to -opentsdb. Opentsdb is an open source tool for storing and graphing metrics. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Opentsdb Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The address of the opentsdb server. - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The metric(s) to use. This supports dynamic strings like %{source_host} -for metric names and also for values. This is an array field with key -of the metric name, value of the metric value, and multiple tag,values . Example: -[source,ruby] - [ - "%{host}/uptime", - %{uptime_1m} " , - "hostname" , - "%{host} - "anotherhostname" , - "%{host} - ] - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `4242` - -The port to connect on your graphite server. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/opentsdb-v3.1.4.asciidoc b/docs/versioned-plugins/outputs/opentsdb-v3.1.4.asciidoc deleted file mode 100644 index b83c23945..000000000 --- a/docs/versioned-plugins/outputs/opentsdb-v3.1.4.asciidoc +++ /dev/null @@ -1,87 +0,0 @@ -:plugin: opentsdb -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.4 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-opentsdb/blob/v3.1.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Opentsdb output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output allows you to pull metrics from your logs and ship them to -opentsdb. Opentsdb is an open source tool for storing and graphing metrics. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Opentsdb Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The address of the opentsdb server. - -[id="{version}-plugins-{type}s-{plugin}-metrics"] -===== `metrics` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The metric(s) to use. This supports dynamic strings like %{source_host} -for metric names and also for values. This is an array field with key -of the metric name, value of the metric value, and multiple tag,values . Example: -[source,ruby] - [ - "%{host}/uptime", - %{uptime_1m} " , - "hostname" , - "%{host} - "anotherhostname" , - "%{host} - ] - -The value will be coerced to a floating point value. Values which cannot be -coerced will zero (0) - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `4242` - -The port to connect on your graphite server. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/pagerduty-index.asciidoc b/docs/versioned-plugins/outputs/pagerduty-index.asciidoc deleted file mode 100644 index a2f189b21..000000000 --- a/docs/versioned-plugins/outputs/pagerduty-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: pagerduty -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::pagerduty-v3.0.6.asciidoc[] -include::pagerduty-v3.0.5.asciidoc[] -include::pagerduty-v3.0.4.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/pagerduty-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/pagerduty-v3.0.4.asciidoc deleted file mode 100644 index e5c4289d5..000000000 --- a/docs/versioned-plugins/outputs/pagerduty-v3.0.4.asciidoc +++ /dev/null @@ -1,105 +0,0 @@ -:plugin: pagerduty -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-pagerduty/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Pagerduty output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The PagerDuty output will send notifications based on pre-configured services -and escalation policies. Logstash can send "trigger", "acknowledge" and "resolve" -event types. In addition, you may configure custom descriptions and event details. -The only required field is the PagerDuty "Service API Key", which can be found on -the service's web page on pagerduty.com. In the default case, the description and -event details will be populated by Logstash, using `message`, `timestamp` and `host` data. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Pagerduty Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-description>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-details>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-event_type>> |<>, one of `["trigger", "acknowledge", "resolve"]`|No -| <<{version}-plugins-{type}s-{plugin}-incident_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pdurl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-service_key>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-description"] -===== `description` - - * Value type is <> - * Default value is `"Logstash event for %{host}"` - -Custom description - -[id="{version}-plugins-{type}s-{plugin}-details"] -===== `details` - - * Value type is <> - * Default value is `{"timestamp"=>"%{@timestamp}", "message"=>"%{message}"}` - -The event details. These might be data from the Logstash event fields you wish to include. -Tags are automatically included if detected so there is no need to explicitly add them here. - -[id="{version}-plugins-{type}s-{plugin}-event_type"] -===== `event_type` - - * Value can be any of: `trigger`, `acknowledge`, `resolve` - * Default value is `"trigger"` - -Event type - -[id="{version}-plugins-{type}s-{plugin}-incident_key"] -===== `incident_key` - - * Value type is <> - * Default value is `"logstash/%{host}/%{type}"` - -The service key to use. You'll need to set this up in PagerDuty beforehand. - -[id="{version}-plugins-{type}s-{plugin}-pdurl"] -===== `pdurl` - - * Value type is <> - * Default value is `"https://events.pagerduty.com/generic/2010-04-15/create_event.json"` - -PagerDuty API URL. You shouldn't need to change this, but is included to allow for flexibility -should PagerDuty iterate the API and Logstash hasn't been updated yet. - -[id="{version}-plugins-{type}s-{plugin}-service_key"] -===== `service_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The PagerDuty Service API Key - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/pagerduty-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/pagerduty-v3.0.5.asciidoc deleted file mode 100644 index b93f9edb1..000000000 --- a/docs/versioned-plugins/outputs/pagerduty-v3.0.5.asciidoc +++ /dev/null @@ -1,105 +0,0 @@ -:plugin: pagerduty -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-pagerduty/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Pagerduty output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The PagerDuty output will send notifications based on pre-configured services -and escalation policies. Logstash can send "trigger", "acknowledge" and "resolve" -event types. In addition, you may configure custom descriptions and event details. -The only required field is the PagerDuty "Service API Key", which can be found on -the service's web page on pagerduty.com. In the default case, the description and -event details will be populated by Logstash, using `message`, `timestamp` and `host` data. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Pagerduty Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-description>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-details>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-event_type>> |<>, one of `["trigger", "acknowledge", "resolve"]`|No -| <<{version}-plugins-{type}s-{plugin}-incident_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pdurl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-service_key>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-description"] -===== `description` - - * Value type is <> - * Default value is `"Logstash event for %{host}"` - -Custom description - -[id="{version}-plugins-{type}s-{plugin}-details"] -===== `details` - - * Value type is <> - * Default value is `{"timestamp"=>"%{@timestamp}", "message"=>"%{message}"}` - -The event details. These might be data from the Logstash event fields you wish to include. -Tags are automatically included if detected so there is no need to explicitly add them here. - -[id="{version}-plugins-{type}s-{plugin}-event_type"] -===== `event_type` - - * Value can be any of: `trigger`, `acknowledge`, `resolve` - * Default value is `"trigger"` - -Event type - -[id="{version}-plugins-{type}s-{plugin}-incident_key"] -===== `incident_key` - - * Value type is <> - * Default value is `"logstash/%{host}/%{type}"` - -The service key to use. You'll need to set this up in PagerDuty beforehand. - -[id="{version}-plugins-{type}s-{plugin}-pdurl"] -===== `pdurl` - - * Value type is <> - * Default value is `"https://events.pagerduty.com/generic/2010-04-15/create_event.json"` - -PagerDuty API URL. You shouldn't need to change this, but is included to allow for flexibility -should PagerDuty iterate the API and Logstash hasn't been updated yet. - -[id="{version}-plugins-{type}s-{plugin}-service_key"] -===== `service_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The PagerDuty Service API Key - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/pagerduty-v3.0.6.asciidoc b/docs/versioned-plugins/outputs/pagerduty-v3.0.6.asciidoc deleted file mode 100644 index fd2777193..000000000 --- a/docs/versioned-plugins/outputs/pagerduty-v3.0.6.asciidoc +++ /dev/null @@ -1,105 +0,0 @@ -:plugin: pagerduty -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-pagerduty/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Pagerduty output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The PagerDuty output will send notifications based on pre-configured services -and escalation policies. Logstash can send "trigger", "acknowledge" and "resolve" -event types. In addition, you may configure custom descriptions and event details. -The only required field is the PagerDuty "Service API Key", which can be found on -the service's web page on pagerduty.com. In the default case, the description and -event details will be populated by Logstash, using `message`, `timestamp` and `host` data. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Pagerduty Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-description>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-details>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-event_type>> |<>, one of `["trigger", "acknowledge", "resolve"]`|No -| <<{version}-plugins-{type}s-{plugin}-incident_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pdurl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-service_key>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-description"] -===== `description` - - * Value type is <> - * Default value is `"Logstash event for %{host}"` - -Custom description - -[id="{version}-plugins-{type}s-{plugin}-details"] -===== `details` - - * Value type is <> - * Default value is `{"timestamp"=>"%{@timestamp}", "message"=>"%{message}"}` - -The event details. These might be data from the Logstash event fields you wish to include. -Tags are automatically included if detected so there is no need to explicitly add them here. - -[id="{version}-plugins-{type}s-{plugin}-event_type"] -===== `event_type` - - * Value can be any of: `trigger`, `acknowledge`, `resolve` - * Default value is `"trigger"` - -Event type - -[id="{version}-plugins-{type}s-{plugin}-incident_key"] -===== `incident_key` - - * Value type is <> - * Default value is `"logstash/%{host}/%{type}"` - -The service key to use. You'll need to set this up in PagerDuty beforehand. - -[id="{version}-plugins-{type}s-{plugin}-pdurl"] -===== `pdurl` - - * Value type is <> - * Default value is `"https://events.pagerduty.com/generic/2010-04-15/create_event.json"` - -PagerDuty API URL. You shouldn't need to change this, but is included to allow for flexibility -should PagerDuty iterate the API and Logstash hasn't been updated yet. - -[id="{version}-plugins-{type}s-{plugin}-service_key"] -===== `service_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The PagerDuty Service API Key - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/pipe-index.asciidoc b/docs/versioned-plugins/outputs/pipe-index.asciidoc deleted file mode 100644 index b13faa213..000000000 --- a/docs/versioned-plugins/outputs/pipe-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: pipe -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::pipe-v3.0.5.asciidoc[] -include::pipe-v3.0.4.asciidoc[] -include::pipe-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/pipe-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/pipe-v3.0.3.asciidoc deleted file mode 100644 index e68be7cd0..000000000 --- a/docs/versioned-plugins/outputs/pipe-v3.0.3.asciidoc +++ /dev/null @@ -1,80 +0,0 @@ -:plugin: pipe -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-pipe/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Pipe output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Pipe output. - -Pipe events to stdin of another program. You can use fields from the -event as parts of the command. -WARNING: This feature can cause logstash to fork off multiple children if you are not carefull with per-event commandline. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Pipe Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ttl>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-command"] -===== `command` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Command line to launch and pipe to - -[id="{version}-plugins-{type}s-{plugin}-message_format"] -===== `message_format` - - * Value type is <> - * There is no default value for this setting. - -The format to use when writing events to the pipe. This value -supports any string and can include `%{name}` and other dynamic -strings. - -If this setting is omitted, the full json representation of the -event will be written as a single line. - -[id="{version}-plugins-{type}s-{plugin}-ttl"] -===== `ttl` - - * Value type is <> - * Default value is `10` - -Close pipe that hasn't been used for TTL seconds. -1 or 0 means never close. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/pipe-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/pipe-v3.0.4.asciidoc deleted file mode 100644 index 17e9d177e..000000000 --- a/docs/versioned-plugins/outputs/pipe-v3.0.4.asciidoc +++ /dev/null @@ -1,80 +0,0 @@ -:plugin: pipe -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-pipe/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Pipe output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Pipe output. - -Pipe events to stdin of another program. You can use fields from the -event as parts of the command. -WARNING: This feature can cause logstash to fork off multiple children if you are not carefull with per-event commandline. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Pipe Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ttl>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-command"] -===== `command` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Command line to launch and pipe to - -[id="{version}-plugins-{type}s-{plugin}-message_format"] -===== `message_format` - - * Value type is <> - * There is no default value for this setting. - -The format to use when writing events to the pipe. This value -supports any string and can include `%{name}` and other dynamic -strings. - -If this setting is omitted, the full json representation of the -event will be written as a single line. - -[id="{version}-plugins-{type}s-{plugin}-ttl"] -===== `ttl` - - * Value type is <> - * Default value is `10` - -Close pipe that hasn't been used for TTL seconds. -1 or 0 means never close. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/pipe-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/pipe-v3.0.5.asciidoc deleted file mode 100644 index bb1da5e6b..000000000 --- a/docs/versioned-plugins/outputs/pipe-v3.0.5.asciidoc +++ /dev/null @@ -1,80 +0,0 @@ -:plugin: pipe -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-pipe/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Pipe output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Pipe output. - -Pipe events to stdin of another program. You can use fields from the -event as parts of the command. -WARNING: This feature can cause logstash to fork off multiple children if you are not carefull with per-event commandline. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Pipe Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-command>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-message_format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ttl>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-command"] -===== `command` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Command line to launch and pipe to - -[id="{version}-plugins-{type}s-{plugin}-message_format"] -===== `message_format` - - * Value type is <> - * There is no default value for this setting. - -The format to use when writing events to the pipe. This value -supports any string and can include `%{name}` and other dynamic -strings. - -If this setting is omitted, the full json representation of the -event will be written as a single line. - -[id="{version}-plugins-{type}s-{plugin}-ttl"] -===== `ttl` - - * Value type is <> - * Default value is `10` - -Close pipe that hasn't been used for TTL seconds. -1 or 0 means never close. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rabbitmq-index.asciidoc b/docs/versioned-plugins/outputs/rabbitmq-index.asciidoc deleted file mode 100644 index c52a53ee9..000000000 --- a/docs/versioned-plugins/outputs/rabbitmq-index.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ -:plugin: rabbitmq -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2018-01-09 -| <> | 2017-11-13 -| <> | 2017-09-20 -| <> | 2017-08-16 -| <> | 2017-08-02 -| <> | 2017-07-08 -| <> | 2017-06-23 -|======================================================================= - -include::rabbitmq-v5.1.0.asciidoc[] -include::rabbitmq-v5.0.3.asciidoc[] -include::rabbitmq-v5.0.2.asciidoc[] -include::rabbitmq-v5.0.1.asciidoc[] -include::rabbitmq-v5.0.0.asciidoc[] -include::rabbitmq-v4.0.9.asciidoc[] -include::rabbitmq-v4.0.8.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/rabbitmq-v4.0.8.asciidoc b/docs/versioned-plugins/outputs/rabbitmq-v4.0.8.asciidoc deleted file mode 100644 index b468cd039..000000000 --- a/docs/versioned-plugins/outputs/rabbitmq-v4.0.8.asciidoc +++ /dev/null @@ -1,279 +0,0 @@ -:plugin: rabbitmq -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.8 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-rabbitmq/blob/v4.0.8/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Rabbitmq output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push events to a RabbitMQ exchange. Requires RabbitMQ 2.x -or later version (3.x is recommended). - -Relevant links: - -* http://www.rabbitmq.com/[RabbitMQ] -* http://rubymarchhare.info[March Hare] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rabbitmq Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>, one of `["fanout", "direct", "topic", "x-consistent-hash", "x-modulus-hash"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-persistent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-arguments"] -===== `arguments` - - * Value type is <> - * Default value is `{}` - -Extra queue arguments as an array. -To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` - -[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] -===== `automatic_recovery` - - * Value type is <> - * Default value is `true` - -Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! - -[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] -===== `connect_retry_interval` - - * Value type is <> - * Default value is `1` - -Time in seconds to wait before retrying a connection - -[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] -===== `connection_timeout` - - * Value type is <> - * There is no default value for this setting. - -The default connection timeout in milliseconds. If not specified the timeout is infinite. - -[id="{version}-plugins-{type}s-{plugin}-debug"] -===== `debug` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `false` - -Enable or disable logging - -[id="{version}-plugins-{type}s-{plugin}-durable"] -===== `durable` - - * Value type is <> - * Default value is `true` - -Is this exchange durable? (aka; Should it survive a broker restart?) - -[id="{version}-plugins-{type}s-{plugin}-exchange"] -===== `exchange` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the exchange - -[id="{version}-plugins-{type}s-{plugin}-exchange_type"] -===== `exchange_type` - - * This is a required setting. - * Value can be any of: `fanout`, `direct`, `topic`, `x-consistent-hash`, `x-modulus-hash` - * There is no default value for this setting. - -The exchange type (fanout, topic, direct) - -[id="{version}-plugins-{type}s-{plugin}-heartbeat"] -===== `heartbeat` - - * Value type is <> - * There is no default value for this setting. - -Heartbeat delay in seconds. If unspecified no heartbeats will be sent - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Common functionality for the rabbitmq input/output -RabbitMQ server address(es) -host can either be a single host, or a list of hosts -i.e. - host => "localhost" -or - host => ["host01", "host02] - -if multiple hosts are provided on the initial connection and any subsequent -recovery attempts of the hosts is chosen at random and connected to. -Note that only one host connection is active at a time. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * Default value is `"logstash"` - -The default codec for this plugin is JSON. You can override this to suit your particular needs however. -Key to route to by default. Defaults to 'logstash' - -* Routing keys are ignored on fanout exchanges. - -[id="{version}-plugins-{type}s-{plugin}-passive"] -===== `passive` - - * Value type is <> - * Default value is `false` - -Passive queue creation? Useful for checking queue existance without modifying server state - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ password - -[id="{version}-plugins-{type}s-{plugin}-persistent"] -===== `persistent` - - * Value type is <> - * Default value is `true` - -Should RabbitMQ persist messages to disk? - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5672` - -RabbitMQ port to connect on - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * There is no default value for this setting. - -Enable or disable SSL. -Note that by default remote certificate verification is off. -Specify ssl_certificate_path and ssl_certificate_password if you need -certificate verification - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] -===== `ssl_certificate_password` - - * Value type is <> - * There is no default value for this setting. - -Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] -===== `ssl_certificate_path` - - * Value type is <> - * There is no default value for this setting. - -Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host - -[id="{version}-plugins-{type}s-{plugin}-ssl_version"] -===== `ssl_version` - - * Value type is <> - * Default value is `"TLSv1.2"` - -Version of the SSL protocol to use. - -[id="{version}-plugins-{type}s-{plugin}-tls_certificate_password"] -===== `tls_certificate_password` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -TLS certificate password - -[id="{version}-plugins-{type}s-{plugin}-tls_certificate_path"] -===== `tls_certificate_path` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -TLS certifcate path - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ username - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `"/"` - -The vhost (virtual host) to use. If you don't know what this -is, leave the default. With the exception of the default -vhost ("/"), names of vhosts should not begin with a forward -slash. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rabbitmq-v4.0.9.asciidoc b/docs/versioned-plugins/outputs/rabbitmq-v4.0.9.asciidoc deleted file mode 100644 index 0aa3f6974..000000000 --- a/docs/versioned-plugins/outputs/rabbitmq-v4.0.9.asciidoc +++ /dev/null @@ -1,293 +0,0 @@ -:plugin: rabbitmq -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.9 -:release_date: 2017-07-08 -:changelog_url: https://github.com/logstash-plugins/logstash-output-rabbitmq/blob/v4.0.9/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Rabbitmq output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push events to a RabbitMQ exchange. Requires RabbitMQ 2.x -or later version (3.x is recommended). - -Relevant links: - -* http://www.rabbitmq.com/[RabbitMQ] -* http://rubymarchhare.info[March Hare] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rabbitmq Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>, one of `["fanout", "direct", "topic", "x-consistent-hash", "x-modulus-hash"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_properties>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-persistent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-arguments"] -===== `arguments` - - * Value type is <> - * Default value is `{}` - -Extra queue arguments as an array. -To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` - -[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] -===== `automatic_recovery` - - * Value type is <> - * Default value is `true` - -Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! - -[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] -===== `connect_retry_interval` - - * Value type is <> - * Default value is `1` - -Time in seconds to wait before retrying a connection - -[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] -===== `connection_timeout` - - * Value type is <> - * There is no default value for this setting. - -The default connection timeout in milliseconds. If not specified the timeout is infinite. - -[id="{version}-plugins-{type}s-{plugin}-debug"] -===== `debug` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `false` - -Enable or disable logging - -[id="{version}-plugins-{type}s-{plugin}-durable"] -===== `durable` - - * Value type is <> - * Default value is `true` - -Is this exchange durable? (aka; Should it survive a broker restart?) - -[id="{version}-plugins-{type}s-{plugin}-exchange"] -===== `exchange` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the exchange - -[id="{version}-plugins-{type}s-{plugin}-exchange_type"] -===== `exchange_type` - - * This is a required setting. - * Value can be any of: `fanout`, `direct`, `topic`, `x-consistent-hash`, `x-modulus-hash` - * There is no default value for this setting. - -The exchange type (fanout, topic, direct) - -[id="{version}-plugins-{type}s-{plugin}-heartbeat"] -===== `heartbeat` - - * Value type is <> - * There is no default value for this setting. - -Heartbeat delay in seconds. If unspecified no heartbeats will be sent - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Common functionality for the rabbitmq input/output -RabbitMQ server address(es) -host can either be a single host, or a list of hosts -i.e. - host => "localhost" -or - host => ["host01", "host02] - -if multiple hosts are provided on the initial connection and any subsequent -recovery attempts of the hosts is chosen at random and connected to. -Note that only one host connection is active at a time. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * Default value is `"logstash"` - -The default codec for this plugin is JSON. You can override this to suit your particular needs however. -Key to route to by default. Defaults to 'logstash' - -* Routing keys are ignored on fanout exchanges. - -[id="{version}-plugins-{type}s-{plugin}-message_properties"] -===== `message_properties` - - * Value type is <> - * Default value is `{}` - -Add properties to be set per-message here, such as 'Content-Type', 'Priority' - -Example: -[source,ruby] - message_properties => { "priority" => "1" } - - -[id="{version}-plugins-{type}s-{plugin}-passive"] -===== `passive` - - * Value type is <> - * Default value is `false` - -Passive queue creation? Useful for checking queue existance without modifying server state - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ password - -[id="{version}-plugins-{type}s-{plugin}-persistent"] -===== `persistent` - - * Value type is <> - * Default value is `true` - -Should RabbitMQ persist messages to disk? - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5672` - -RabbitMQ port to connect on - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * There is no default value for this setting. - -Enable or disable SSL. -Note that by default remote certificate verification is off. -Specify ssl_certificate_path and ssl_certificate_password if you need -certificate verification - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] -===== `ssl_certificate_password` - - * Value type is <> - * There is no default value for this setting. - -Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] -===== `ssl_certificate_path` - - * Value type is <> - * There is no default value for this setting. - -Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host - -[id="{version}-plugins-{type}s-{plugin}-ssl_version"] -===== `ssl_version` - - * Value type is <> - * Default value is `"TLSv1.2"` - -Version of the SSL protocol to use. - -[id="{version}-plugins-{type}s-{plugin}-tls_certificate_password"] -===== `tls_certificate_password` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -TLS certificate password - -[id="{version}-plugins-{type}s-{plugin}-tls_certificate_path"] -===== `tls_certificate_path` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -TLS certifcate path - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ username - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `"/"` - -The vhost (virtual host) to use. If you don't know what this -is, leave the default. With the exception of the default -vhost ("/"), names of vhosts should not begin with a forward -slash. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rabbitmq-v5.0.0.asciidoc b/docs/versioned-plugins/outputs/rabbitmq-v5.0.0.asciidoc deleted file mode 100644 index 2e6d28776..000000000 --- a/docs/versioned-plugins/outputs/rabbitmq-v5.0.0.asciidoc +++ /dev/null @@ -1,266 +0,0 @@ -:plugin: rabbitmq -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.0 -:release_date: 2017-08-02 -:changelog_url: https://github.com/logstash-plugins/logstash-output-rabbitmq/blob/v5.0.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Rabbitmq output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push events to a RabbitMQ exchange. Requires RabbitMQ 2.x -or later version (3.x is recommended). - -Relevant links: - -* http://www.rabbitmq.com/[RabbitMQ] -* http://rubymarchhare.info[March Hare] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rabbitmq Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>, one of `["fanout", "direct", "topic", "x-consistent-hash", "x-modulus-hash"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_properties>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-persistent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-arguments"] -===== `arguments` - - * Value type is <> - * Default value is `{}` - -Extra queue arguments as an array. -To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` - -[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] -===== `automatic_recovery` - - * Value type is <> - * Default value is `true` - -Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! - -[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] -===== `connect_retry_interval` - - * Value type is <> - * Default value is `1` - -Time in seconds to wait before retrying a connection - -[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] -===== `connection_timeout` - - * Value type is <> - * There is no default value for this setting. - -The default connection timeout in milliseconds. If not specified the timeout is infinite. - -[id="{version}-plugins-{type}s-{plugin}-durable"] -===== `durable` - - * Value type is <> - * Default value is `true` - -Is this exchange durable? (aka; Should it survive a broker restart?) - -[id="{version}-plugins-{type}s-{plugin}-exchange"] -===== `exchange` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the exchange - -[id="{version}-plugins-{type}s-{plugin}-exchange_type"] -===== `exchange_type` - - * This is a required setting. - * Value can be any of: `fanout`, `direct`, `topic`, `x-consistent-hash`, `x-modulus-hash` - * There is no default value for this setting. - -The exchange type (fanout, topic, direct) - -[id="{version}-plugins-{type}s-{plugin}-heartbeat"] -===== `heartbeat` - - * Value type is <> - * There is no default value for this setting. - -Heartbeat delay in seconds. If unspecified no heartbeats will be sent - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Common functionality for the rabbitmq input/output -RabbitMQ server address(es) -host can either be a single host, or a list of hosts -i.e. - host => "localhost" -or - host => ["host01", "host02] - -if multiple hosts are provided on the initial connection and any subsequent -recovery attempts of the hosts is chosen at random and connected to. -Note that only one host connection is active at a time. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * Default value is `"logstash"` - -The default codec for this plugin is JSON. You can override this to suit your particular needs however. -Key to route to by default. Defaults to 'logstash' - -* Routing keys are ignored on fanout exchanges. - -[id="{version}-plugins-{type}s-{plugin}-message_properties"] -===== `message_properties` - - * Value type is <> - * Default value is `{}` - -Add properties to be set per-message here, such as 'Content-Type', 'Priority' - -Example: -[source,ruby] - message_properties => { "priority" => "1" } - - -[id="{version}-plugins-{type}s-{plugin}-passive"] -===== `passive` - - * Value type is <> - * Default value is `false` - -Passive queue creation? Useful for checking queue existance without modifying server state - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ password - -[id="{version}-plugins-{type}s-{plugin}-persistent"] -===== `persistent` - - * Value type is <> - * Default value is `true` - -Should RabbitMQ persist messages to disk? - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5672` - -RabbitMQ port to connect on - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * There is no default value for this setting. - -Enable or disable SSL. -Note that by default remote certificate verification is off. -Specify ssl_certificate_path and ssl_certificate_password if you need -certificate verification - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] -===== `ssl_certificate_password` - - * Value type is <> - * There is no default value for this setting. - -Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] -===== `ssl_certificate_path` - - * Value type is <> - * There is no default value for this setting. - -Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host - -[id="{version}-plugins-{type}s-{plugin}-ssl_version"] -===== `ssl_version` - - * Value type is <> - * Default value is `"TLSv1.2"` - -Version of the SSL protocol to use. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ username - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `"/"` - -The vhost (virtual host) to use. If you don't know what this -is, leave the default. With the exception of the default -vhost ("/"), names of vhosts should not begin with a forward -slash. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rabbitmq-v5.0.1.asciidoc b/docs/versioned-plugins/outputs/rabbitmq-v5.0.1.asciidoc deleted file mode 100644 index 708861ed7..000000000 --- a/docs/versioned-plugins/outputs/rabbitmq-v5.0.1.asciidoc +++ /dev/null @@ -1,266 +0,0 @@ -:plugin: rabbitmq -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.1 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-rabbitmq/blob/v5.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Rabbitmq output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push events to a RabbitMQ exchange. Requires RabbitMQ 2.x -or later version (3.x is recommended). - -Relevant links: - -* http://www.rabbitmq.com/[RabbitMQ] -* http://rubymarchhare.info[March Hare] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rabbitmq Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>, one of `["fanout", "direct", "topic", "x-consistent-hash", "x-modulus-hash"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_properties>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-persistent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-arguments"] -===== `arguments` - - * Value type is <> - * Default value is `{}` - -Extra queue arguments as an array. -To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` - -[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] -===== `automatic_recovery` - - * Value type is <> - * Default value is `true` - -Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! - -[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] -===== `connect_retry_interval` - - * Value type is <> - * Default value is `1` - -Time in seconds to wait before retrying a connection - -[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] -===== `connection_timeout` - - * Value type is <> - * There is no default value for this setting. - -The default connection timeout in milliseconds. If not specified the timeout is infinite. - -[id="{version}-plugins-{type}s-{plugin}-durable"] -===== `durable` - - * Value type is <> - * Default value is `true` - -Is this exchange durable? (aka; Should it survive a broker restart?) - -[id="{version}-plugins-{type}s-{plugin}-exchange"] -===== `exchange` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the exchange - -[id="{version}-plugins-{type}s-{plugin}-exchange_type"] -===== `exchange_type` - - * This is a required setting. - * Value can be any of: `fanout`, `direct`, `topic`, `x-consistent-hash`, `x-modulus-hash` - * There is no default value for this setting. - -The exchange type (fanout, topic, direct) - -[id="{version}-plugins-{type}s-{plugin}-heartbeat"] -===== `heartbeat` - - * Value type is <> - * There is no default value for this setting. - -Heartbeat delay in seconds. If unspecified no heartbeats will be sent - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Common functionality for the rabbitmq input/output -RabbitMQ server address(es) -host can either be a single host, or a list of hosts -i.e. - host => "localhost" -or - host => ["host01", "host02] - -if multiple hosts are provided on the initial connection and any subsequent -recovery attempts of the hosts is chosen at random and connected to. -Note that only one host connection is active at a time. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * Default value is `"logstash"` - -The default codec for this plugin is JSON. You can override this to suit your particular needs however. -Key to route to by default. Defaults to 'logstash' - -* Routing keys are ignored on fanout exchanges. - -[id="{version}-plugins-{type}s-{plugin}-message_properties"] -===== `message_properties` - - * Value type is <> - * Default value is `{}` - -Add properties to be set per-message here, such as 'Content-Type', 'Priority' - -Example: -[source,ruby] - message_properties => { "priority" => "1" } - - -[id="{version}-plugins-{type}s-{plugin}-passive"] -===== `passive` - - * Value type is <> - * Default value is `false` - -Passive queue creation? Useful for checking queue existance without modifying server state - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ password - -[id="{version}-plugins-{type}s-{plugin}-persistent"] -===== `persistent` - - * Value type is <> - * Default value is `true` - -Should RabbitMQ persist messages to disk? - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5672` - -RabbitMQ port to connect on - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * There is no default value for this setting. - -Enable or disable SSL. -Note that by default remote certificate verification is off. -Specify ssl_certificate_path and ssl_certificate_password if you need -certificate verification - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] -===== `ssl_certificate_password` - - * Value type is <> - * There is no default value for this setting. - -Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] -===== `ssl_certificate_path` - - * Value type is <> - * There is no default value for this setting. - -Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host - -[id="{version}-plugins-{type}s-{plugin}-ssl_version"] -===== `ssl_version` - - * Value type is <> - * Default value is `"TLSv1.2"` - -Version of the SSL protocol to use. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ username - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `"/"` - -The vhost (virtual host) to use. If you don't know what this -is, leave the default. With the exception of the default -vhost ("/"), names of vhosts should not begin with a forward -slash. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rabbitmq-v5.0.2.asciidoc b/docs/versioned-plugins/outputs/rabbitmq-v5.0.2.asciidoc deleted file mode 100644 index f2f5a5adf..000000000 --- a/docs/versioned-plugins/outputs/rabbitmq-v5.0.2.asciidoc +++ /dev/null @@ -1,266 +0,0 @@ -:plugin: rabbitmq -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.2 -:release_date: 2017-09-20 -:changelog_url: https://github.com/logstash-plugins/logstash-output-rabbitmq/blob/v5.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Rabbitmq output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push events to a RabbitMQ exchange. Requires RabbitMQ 2.x -or later version (3.x is recommended). - -Relevant links: - -* http://www.rabbitmq.com/[RabbitMQ] -* http://rubymarchhare.info[March Hare] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rabbitmq Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>, one of `["fanout", "direct", "topic", "x-consistent-hash", "x-modulus-hash"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_properties>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-persistent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-arguments"] -===== `arguments` - - * Value type is <> - * Default value is `{}` - -Extra queue arguments as an array. -To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` - -[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] -===== `automatic_recovery` - - * Value type is <> - * Default value is `true` - -Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! - -[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] -===== `connect_retry_interval` - - * Value type is <> - * Default value is `1` - -Time in seconds to wait before retrying a connection - -[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] -===== `connection_timeout` - - * Value type is <> - * There is no default value for this setting. - -The default connection timeout in milliseconds. If not specified the timeout is infinite. - -[id="{version}-plugins-{type}s-{plugin}-durable"] -===== `durable` - - * Value type is <> - * Default value is `true` - -Is this exchange durable? (aka; Should it survive a broker restart?) - -[id="{version}-plugins-{type}s-{plugin}-exchange"] -===== `exchange` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the exchange - -[id="{version}-plugins-{type}s-{plugin}-exchange_type"] -===== `exchange_type` - - * This is a required setting. - * Value can be any of: `fanout`, `direct`, `topic`, `x-consistent-hash`, `x-modulus-hash` - * There is no default value for this setting. - -The exchange type (fanout, topic, direct) - -[id="{version}-plugins-{type}s-{plugin}-heartbeat"] -===== `heartbeat` - - * Value type is <> - * There is no default value for this setting. - -Heartbeat delay in seconds. If unspecified no heartbeats will be sent - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Common functionality for the rabbitmq input/output -RabbitMQ server address(es) -host can either be a single host, or a list of hosts -i.e. - host => "localhost" -or - host => ["host01", "host02] - -if multiple hosts are provided on the initial connection and any subsequent -recovery attempts of the hosts is chosen at random and connected to. -Note that only one host connection is active at a time. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * Default value is `"logstash"` - -The default codec for this plugin is JSON. You can override this to suit your particular needs however. -Key to route to by default. Defaults to 'logstash' - -* Routing keys are ignored on fanout exchanges. - -[id="{version}-plugins-{type}s-{plugin}-message_properties"] -===== `message_properties` - - * Value type is <> - * Default value is `{}` - -Add properties to be set per-message here, such as 'Content-Type', 'Priority' - -Example: -[source,ruby] - message_properties => { "priority" => "1" } - - -[id="{version}-plugins-{type}s-{plugin}-passive"] -===== `passive` - - * Value type is <> - * Default value is `false` - -Passive queue creation? Useful for checking queue existance without modifying server state - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ password - -[id="{version}-plugins-{type}s-{plugin}-persistent"] -===== `persistent` - - * Value type is <> - * Default value is `true` - -Should RabbitMQ persist messages to disk? - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5672` - -RabbitMQ port to connect on - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * There is no default value for this setting. - -Enable or disable SSL. -Note that by default remote certificate verification is off. -Specify ssl_certificate_path and ssl_certificate_password if you need -certificate verification - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] -===== `ssl_certificate_password` - - * Value type is <> - * There is no default value for this setting. - -Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] -===== `ssl_certificate_path` - - * Value type is <> - * There is no default value for this setting. - -Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host - -[id="{version}-plugins-{type}s-{plugin}-ssl_version"] -===== `ssl_version` - - * Value type is <> - * Default value is `"TLSv1.2"` - -Version of the SSL protocol to use. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ username - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `"/"` - -The vhost (virtual host) to use. If you don't know what this -is, leave the default. With the exception of the default -vhost ("/"), names of vhosts should not begin with a forward -slash. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rabbitmq-v5.0.3.asciidoc b/docs/versioned-plugins/outputs/rabbitmq-v5.0.3.asciidoc deleted file mode 100644 index 780baefdb..000000000 --- a/docs/versioned-plugins/outputs/rabbitmq-v5.0.3.asciidoc +++ /dev/null @@ -1,266 +0,0 @@ -:plugin: rabbitmq -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.3 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-rabbitmq/blob/v5.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Rabbitmq output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push events to a RabbitMQ exchange. Requires RabbitMQ 2.x -or later version (3.x is recommended). - -Relevant links: - -* http://www.rabbitmq.com/[RabbitMQ] -* http://rubymarchhare.info[March Hare] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rabbitmq Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>, one of `["fanout", "direct", "topic", "x-consistent-hash", "x-modulus-hash"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_properties>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-persistent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-arguments"] -===== `arguments` - - * Value type is <> - * Default value is `{}` - -Extra queue arguments as an array. -To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` - -[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] -===== `automatic_recovery` - - * Value type is <> - * Default value is `true` - -Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! - -[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] -===== `connect_retry_interval` - - * Value type is <> - * Default value is `1` - -Time in seconds to wait before retrying a connection - -[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] -===== `connection_timeout` - - * Value type is <> - * There is no default value for this setting. - -The default connection timeout in milliseconds. If not specified the timeout is infinite. - -[id="{version}-plugins-{type}s-{plugin}-durable"] -===== `durable` - - * Value type is <> - * Default value is `true` - -Is this exchange durable? (aka; Should it survive a broker restart?) - -[id="{version}-plugins-{type}s-{plugin}-exchange"] -===== `exchange` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the exchange - -[id="{version}-plugins-{type}s-{plugin}-exchange_type"] -===== `exchange_type` - - * This is a required setting. - * Value can be any of: `fanout`, `direct`, `topic`, `x-consistent-hash`, `x-modulus-hash` - * There is no default value for this setting. - -The exchange type (fanout, topic, direct) - -[id="{version}-plugins-{type}s-{plugin}-heartbeat"] -===== `heartbeat` - - * Value type is <> - * There is no default value for this setting. - -Heartbeat delay in seconds. If unspecified no heartbeats will be sent - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Common functionality for the rabbitmq input/output -RabbitMQ server address(es) -host can either be a single host, or a list of hosts -i.e. - host => "localhost" -or - host => ["host01", "host02] - -if multiple hosts are provided on the initial connection and any subsequent -recovery attempts of the hosts is chosen at random and connected to. -Note that only one host connection is active at a time. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * Default value is `"logstash"` - -The default codec for this plugin is JSON. You can override this to suit your particular needs however. -Key to route to by default. Defaults to 'logstash' - -* Routing keys are ignored on fanout exchanges. - -[id="{version}-plugins-{type}s-{plugin}-message_properties"] -===== `message_properties` - - * Value type is <> - * Default value is `{}` - -Add properties to be set per-message here, such as 'Content-Type', 'Priority' - -Example: -[source,ruby] - message_properties => { "priority" => "1" } - - -[id="{version}-plugins-{type}s-{plugin}-passive"] -===== `passive` - - * Value type is <> - * Default value is `false` - -Passive queue creation? Useful for checking queue existance without modifying server state - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ password - -[id="{version}-plugins-{type}s-{plugin}-persistent"] -===== `persistent` - - * Value type is <> - * Default value is `true` - -Should RabbitMQ persist messages to disk? - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5672` - -RabbitMQ port to connect on - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * There is no default value for this setting. - -Enable or disable SSL. -Note that by default remote certificate verification is off. -Specify ssl_certificate_path and ssl_certificate_password if you need -certificate verification - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] -===== `ssl_certificate_password` - - * Value type is <> - * There is no default value for this setting. - -Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] -===== `ssl_certificate_path` - - * Value type is <> - * There is no default value for this setting. - -Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host - -[id="{version}-plugins-{type}s-{plugin}-ssl_version"] -===== `ssl_version` - - * Value type is <> - * Default value is `"TLSv1.2"` - -Version of the SSL protocol to use. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ username - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `"/"` - -The vhost (virtual host) to use. If you don't know what this -is, leave the default. With the exception of the default -vhost ("/"), names of vhosts should not begin with a forward -slash. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rabbitmq-v5.1.0.asciidoc b/docs/versioned-plugins/outputs/rabbitmq-v5.1.0.asciidoc deleted file mode 100644 index 96681e495..000000000 --- a/docs/versioned-plugins/outputs/rabbitmq-v5.1.0.asciidoc +++ /dev/null @@ -1,266 +0,0 @@ -:plugin: rabbitmq -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.1.0 -:release_date: 2018-01-09 -:changelog_url: https://github.com/logstash-plugins/logstash-output-rabbitmq/blob/v5.1.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Rabbitmq output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push events to a RabbitMQ exchange. Requires RabbitMQ 2.x -or later version (3.x is recommended). - -Relevant links: - -* http://www.rabbitmq.com/[RabbitMQ] -* http://rubymarchhare.info[March Hare] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rabbitmq Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-arguments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-automatic_recovery>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connect_retry_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-connection_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-durable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exchange>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-exchange_type>> |<>, one of `["fanout", "direct", "topic", "x-consistent-hash", "x-modulus-hash"]`|Yes -| <<{version}-plugins-{type}s-{plugin}-heartbeat>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_properties>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-passive>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-persistent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_path>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-arguments"] -===== `arguments` - - * Value type is <> - * Default value is `{}` - -Extra queue arguments as an array. -To make a RabbitMQ queue mirrored, use: `{"x-ha-policy" => "all"}` - -[id="{version}-plugins-{type}s-{plugin}-automatic_recovery"] -===== `automatic_recovery` - - * Value type is <> - * Default value is `true` - -Set this to automatically recover from a broken connection. You almost certainly don't want to override this!!! - -[id="{version}-plugins-{type}s-{plugin}-connect_retry_interval"] -===== `connect_retry_interval` - - * Value type is <> - * Default value is `1` - -Time in seconds to wait before retrying a connection - -[id="{version}-plugins-{type}s-{plugin}-connection_timeout"] -===== `connection_timeout` - - * Value type is <> - * There is no default value for this setting. - -The default connection timeout in milliseconds. If not specified the timeout is infinite. - -[id="{version}-plugins-{type}s-{plugin}-durable"] -===== `durable` - - * Value type is <> - * Default value is `true` - -Is this exchange durable? (aka; Should it survive a broker restart?) - -[id="{version}-plugins-{type}s-{plugin}-exchange"] -===== `exchange` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the exchange - -[id="{version}-plugins-{type}s-{plugin}-exchange_type"] -===== `exchange_type` - - * This is a required setting. - * Value can be any of: `fanout`, `direct`, `topic`, `x-consistent-hash`, `x-modulus-hash` - * There is no default value for this setting. - -The exchange type (fanout, topic, direct) - -[id="{version}-plugins-{type}s-{plugin}-heartbeat"] -===== `heartbeat` - - * Value type is <> - * There is no default value for this setting. - -Heartbeat delay in seconds. If unspecified no heartbeats will be sent - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Common functionality for the rabbitmq input/output -RabbitMQ server address(es) -host can either be a single host, or a list of hosts -i.e. - host => "localhost" -or - host => ["host01", "host02] - -if multiple hosts are provided on the initial connection and any subsequent -recovery attempts of the hosts is chosen at random and connected to. -Note that only one host connection is active at a time. - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * Default value is `"logstash"` - -The default codec for this plugin is JSON. You can override this to suit your particular needs however. -Key to route to by default. Defaults to 'logstash' - -* Routing keys are ignored on fanout exchanges. - -[id="{version}-plugins-{type}s-{plugin}-message_properties"] -===== `message_properties` - - * Value type is <> - * Default value is `{}` - -Add properties to be set per-message here, such as 'Content-Type', 'Priority' - -Example: -[source,ruby] - message_properties => { "priority" => "1" } - - -[id="{version}-plugins-{type}s-{plugin}-passive"] -===== `passive` - - * Value type is <> - * Default value is `false` - -Passive queue creation? Useful for checking queue existance without modifying server state - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ password - -[id="{version}-plugins-{type}s-{plugin}-persistent"] -===== `persistent` - - * Value type is <> - * Default value is `true` - -Should RabbitMQ persist messages to disk? - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5672` - -RabbitMQ port to connect on - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * There is no default value for this setting. - -Enable or disable SSL. -Note that by default remote certificate verification is off. -Specify ssl_certificate_path and ssl_certificate_password if you need -certificate verification - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_password"] -===== `ssl_certificate_password` - - * Value type is <> - * There is no default value for this setting. - -Password for the encrypted PKCS12 (.p12) certificate file specified in ssl_certificate_path - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_path"] -===== `ssl_certificate_path` - - * Value type is <> - * There is no default value for this setting. - -Path to an SSL certificate in PKCS12 (.p12) format used for verifying the remote host - -[id="{version}-plugins-{type}s-{plugin}-ssl_version"] -===== `ssl_version` - - * Value type is <> - * Default value is `"TLSv1.2"` - -Version of the SSL protocol to use. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `"guest"` - -RabbitMQ username - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `"/"` - -The vhost (virtual host) to use. If you don't know what this -is, leave the default. With the exception of the default -vhost ("/"), names of vhosts should not begin with a forward -slash. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rackspace-index.asciidoc b/docs/versioned-plugins/outputs/rackspace-index.asciidoc deleted file mode 100644 index 025a83289..000000000 --- a/docs/versioned-plugins/outputs/rackspace-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: rackspace -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::rackspace-v2.0.7.asciidoc[] -include::rackspace-v2.0.5.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/rackspace-v2.0.5.asciidoc b/docs/versioned-plugins/outputs/rackspace-v2.0.5.asciidoc deleted file mode 100644 index ab1500c42..000000000 --- a/docs/versioned-plugins/outputs/rackspace-v2.0.5.asciidoc +++ /dev/null @@ -1,91 +0,0 @@ -:plugin: rackspace -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-rackspace/blob/v2.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Rackspace output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rackspace Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-api_key"] -===== `api_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Rackspace Cloud API Key - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * Value type is <> - * Default value is `"logstash"` - -Rackspace Queue Name - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value type is <> - * Default value is `"dfw"` - -Rackspace region -ord, dfw, lon, syd, etc - -[id="{version}-plugins-{type}s-{plugin}-ttl"] -===== `ttl` - - * Value type is <> - * Default value is `360` - -time for item to live in queue - -[id="{version}-plugins-{type}s-{plugin}-username"] -===== `username` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Rackspace Cloud Username - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rackspace-v2.0.7.asciidoc b/docs/versioned-plugins/outputs/rackspace-v2.0.7.asciidoc deleted file mode 100644 index 923b9d7ee..000000000 --- a/docs/versioned-plugins/outputs/rackspace-v2.0.7.asciidoc +++ /dev/null @@ -1,91 +0,0 @@ -:plugin: rackspace -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.7 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-rackspace/blob/v2.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Rackspace output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Sends events to a Rackspace Cloud Queue service. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Rackspace Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-api_key>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ttl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-api_key"] -===== `api_key` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Rackspace Cloud API Key - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * Value type is <> - * Default value is `"logstash"` - -Rackspace Queue Name - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value type is <> - * Default value is `"dfw"` - -Rackspace region -ord, dfw, lon, syd, etc - -[id="{version}-plugins-{type}s-{plugin}-ttl"] -===== `ttl` - - * Value type is <> - * Default value is `360` - -time for item to live in queue - -[id="{version}-plugins-{type}s-{plugin}-username"] -===== `username` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -Rackspace Cloud Username - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/rados-index.asciidoc b/docs/versioned-plugins/outputs/rados-index.asciidoc deleted file mode 100644 index 5bef240c6..000000000 --- a/docs/versioned-plugins/outputs/rados-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: rados -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - diff --git a/docs/versioned-plugins/outputs/redis-index.asciidoc b/docs/versioned-plugins/outputs/redis-index.asciidoc deleted file mode 100644 index 57e4d7c7a..000000000 --- a/docs/versioned-plugins/outputs/redis-index.asciidoc +++ /dev/null @@ -1,22 +0,0 @@ -:plugin: redis -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-09-12 -| <> | 2017-08-16 -| <> | 2017-07-27 -| <> | 2017-08-18 -| <> | 2017-06-23 -|======================================================================= - -include::redis-v4.0.3.asciidoc[] -include::redis-v4.0.2.asciidoc[] -include::redis-v4.0.1.asciidoc[] -include::redis-v4.0.0.asciidoc[] -include::redis-v3.0.5.asciidoc[] -include::redis-v3.0.4.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/redis-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/redis-v3.0.4.asciidoc deleted file mode 100644 index 2f61c8777..000000000 --- a/docs/versioned-plugins/outputs/redis-v3.0.4.asciidoc +++ /dev/null @@ -1,221 +0,0 @@ -:plugin: redis -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-redis/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Redis output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output will send events to a Redis queue using RPUSH. -The RPUSH command is supported in Redis v0.0.7+. Using -PUBLISH to a channel requires at least v1.3.8+. -While you may be able to make these Redis versions work, -the best performance and stability will be found in more -recent stable versions. Versions 2.6.0+ are recommended. - -For more information, see http://redis.io/[the Redis homepage] - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Redis Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-congestion_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel"]`|No -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-shuffle_hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-batch"] -===== `batch` - - * Value type is <> - * Default value is `false` - -Set to true if you want Redis to batch up values and send 1 RPUSH command -instead of one command per value to push on the list. Note that this only -works with `data_type="list"` mode right now. - -If true, we send an RPUSH every "batch_events" events or -"batch_timeout" seconds (whichever comes first). -Only supported for `data_type` is "list". - -[id="{version}-plugins-{type}s-{plugin}-batch_events"] -===== `batch_events` - - * Value type is <> - * Default value is `50` - -If batch is set to true, the number of events we queue up for an RPUSH. - -[id="{version}-plugins-{type}s-{plugin}-batch_timeout"] -===== `batch_timeout` - - * Value type is <> - * Default value is `5` - -If batch is set to true, the maximum amount of time between RPUSH commands -when there are pending events to flush. - -[id="{version}-plugins-{type}s-{plugin}-congestion_interval"] -===== `congestion_interval` - - * Value type is <> - * Default value is `1` - -How often to check for congestion. Default is one second. -Zero means to check on every event. - -[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] -===== `congestion_threshold` - - * Value type is <> - * Default value is `0` - -In case Redis `data_type` is `list` and has more than `@congestion_threshold` items, -block until someone consumes them and reduces congestion, otherwise if there are -no consumers Redis will run out of memory, unless it was configured with OOM protection. -But even with OOM protection, a single Redis list can block all other users of Redis, -until Redis CPU consumption reaches the max allowed RAM size. -A default value of 0 means that this limit is disabled. -Only supported for `list` Redis `data_type`. - -[id="{version}-plugins-{type}s-{plugin}-data_type"] -===== `data_type` - - * Value can be any of: `list`, `channel` - * There is no default value for this setting. - -Either list or channel. If `redis_type` is list, then we will set -RPUSH to key. If `redis_type` is channel, then we will PUBLISH to `key`. - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * Value type is <> - * Default value is `0` - -The Redis database number. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `["127.0.0.1"]` - -The hostname(s) of your Redis server(s). Ports may be specified on any -hostname, which will override the global port config. -If the hosts list is an array, Logstash will pick one random host to connect to, -if that host is disconnected it will then pick another. - -For example: -[source,ruby] - "127.0.0.1" - ["127.0.0.1", "127.0.0.2"] - ["127.0.0.1:6380", "127.0.0.1"] - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * There is no default value for this setting. - -The name of a Redis list or channel. Dynamic names are -valid here, for example `logstash-%{type}`. - -[id="{version}-plugins-{type}s-{plugin}-name"] -===== `name` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `"default"` - -Name is used for logging in case there are multiple instances. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to authenticate with. There is no authentication by default. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6379` - -The default port to connect on. Can be overridden on any hostname. - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -The name of the Redis queue (we'll use RPUSH on this). Dynamic names are -valid here, for example `logstash-%{type}` - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `1` - -Interval for reconnecting to failed Redis connections - -[id="{version}-plugins-{type}s-{plugin}-shuffle_hosts"] -===== `shuffle_hosts` - - * Value type is <> - * Default value is `true` - -Shuffle the host list during Logstash startup. - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `5` - -Redis initial connection timeout in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/redis-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/redis-v3.0.5.asciidoc deleted file mode 100644 index 8f3b1939a..000000000 --- a/docs/versioned-plugins/outputs/redis-v3.0.5.asciidoc +++ /dev/null @@ -1,221 +0,0 @@ -:plugin: redis -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-08-18 -:changelog_url: https://github.com/logstash-plugins/logstash-output-redis/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Redis output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output will send events to a Redis queue using RPUSH. -The RPUSH command is supported in Redis v0.0.7+. Using -PUBLISH to a channel requires at least v1.3.8+. -While you may be able to make these Redis versions work, -the best performance and stability will be found in more -recent stable versions. Versions 2.6.0+ are recommended. - -For more information, see http://redis.io/[the Redis homepage] - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Redis Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-congestion_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel"]`|No -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-shuffle_hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-batch"] -===== `batch` - - * Value type is <> - * Default value is `false` - -Set to true if you want Redis to batch up values and send 1 RPUSH command -instead of one command per value to push on the list. Note that this only -works with `data_type="list"` mode right now. - -If true, we send an RPUSH every "batch_events" events or -"batch_timeout" seconds (whichever comes first). -Only supported for `data_type` is "list". - -[id="{version}-plugins-{type}s-{plugin}-batch_events"] -===== `batch_events` - - * Value type is <> - * Default value is `50` - -If batch is set to true, the number of events we queue up for an RPUSH. - -[id="{version}-plugins-{type}s-{plugin}-batch_timeout"] -===== `batch_timeout` - - * Value type is <> - * Default value is `5` - -If batch is set to true, the maximum amount of time between RPUSH commands -when there are pending events to flush. - -[id="{version}-plugins-{type}s-{plugin}-congestion_interval"] -===== `congestion_interval` - - * Value type is <> - * Default value is `1` - -How often to check for congestion. Default is one second. -Zero means to check on every event. - -[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] -===== `congestion_threshold` - - * Value type is <> - * Default value is `0` - -In case Redis `data_type` is `list` and has more than `@congestion_threshold` items, -block until someone consumes them and reduces congestion, otherwise if there are -no consumers Redis will run out of memory, unless it was configured with OOM protection. -But even with OOM protection, a single Redis list can block all other users of Redis, -until Redis CPU consumption reaches the max allowed RAM size. -A default value of 0 means that this limit is disabled. -Only supported for `list` Redis `data_type`. - -[id="{version}-plugins-{type}s-{plugin}-data_type"] -===== `data_type` - - * Value can be any of: `list`, `channel` - * There is no default value for this setting. - -Either list or channel. If `redis_type` is list, then we will set -RPUSH to key. If `redis_type` is channel, then we will PUBLISH to `key`. - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * Value type is <> - * Default value is `0` - -The Redis database number. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `["127.0.0.1"]` - -The hostname(s) of your Redis server(s). Ports may be specified on any -hostname, which will override the global port config. -If the hosts list is an array, Logstash will pick one random host to connect to, -if that host is disconnected it will then pick another. - -For example: -[source,ruby] - "127.0.0.1" - ["127.0.0.1", "127.0.0.2"] - ["127.0.0.1:6380", "127.0.0.1"] - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * There is no default value for this setting. - -The name of a Redis list or channel. Dynamic names are -valid here, for example `logstash-%{type}`. - -[id="{version}-plugins-{type}s-{plugin}-name"] -===== `name` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `"default"` - -Name is used for logging in case there are multiple instances. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to authenticate with. There is no authentication by default. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6379` - -The default port to connect on. Can be overridden on any hostname. - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -The name of the Redis queue (we'll use RPUSH on this). Dynamic names are -valid here, for example `logstash-%{type}` - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `1` - -Interval for reconnecting to failed Redis connections - -[id="{version}-plugins-{type}s-{plugin}-shuffle_hosts"] -===== `shuffle_hosts` - - * Value type is <> - * Default value is `true` - -Shuffle the host list during Logstash startup. - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `5` - -Redis initial connection timeout in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/redis-v4.0.0.asciidoc b/docs/versioned-plugins/outputs/redis-v4.0.0.asciidoc deleted file mode 100644 index 494445566..000000000 --- a/docs/versioned-plugins/outputs/redis-v4.0.0.asciidoc +++ /dev/null @@ -1,202 +0,0 @@ -:plugin: redis -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.0 -:release_date: 2017-07-27 -:changelog_url: https://github.com/logstash-plugins/logstash-output-redis/blob/v4.0.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Redis output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output will send events to a Redis queue using RPUSH. -The RPUSH command is supported in Redis v0.0.7+. Using -PUBLISH to a channel requires at least v1.3.8+. -While you may be able to make these Redis versions work, -the best performance and stability will be found in more -recent stable versions. Versions 2.6.0+ are recommended. - -For more information, see http://redis.io/[the Redis homepage] - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Redis Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-congestion_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel"]`|No -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-shuffle_hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-batch"] -===== `batch` - - * Value type is <> - * Default value is `false` - -Set to true if you want Redis to batch up values and send 1 RPUSH command -instead of one command per value to push on the list. Note that this only -works with `data_type="list"` mode right now. - -If true, we send an RPUSH every "batch_events" events or -"batch_timeout" seconds (whichever comes first). -Only supported for `data_type` is "list". - -[id="{version}-plugins-{type}s-{plugin}-batch_events"] -===== `batch_events` - - * Value type is <> - * Default value is `50` - -If batch is set to true, the number of events we queue up for an RPUSH. - -[id="{version}-plugins-{type}s-{plugin}-batch_timeout"] -===== `batch_timeout` - - * Value type is <> - * Default value is `5` - -If batch is set to true, the maximum amount of time between RPUSH commands -when there are pending events to flush. - -[id="{version}-plugins-{type}s-{plugin}-congestion_interval"] -===== `congestion_interval` - - * Value type is <> - * Default value is `1` - -How often to check for congestion. Default is one second. -Zero means to check on every event. - -[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] -===== `congestion_threshold` - - * Value type is <> - * Default value is `0` - -In case Redis `data_type` is `list` and has more than `@congestion_threshold` items, -block until someone consumes them and reduces congestion, otherwise if there are -no consumers Redis will run out of memory, unless it was configured with OOM protection. -But even with OOM protection, a single Redis list can block all other users of Redis, -until Redis CPU consumption reaches the max allowed RAM size. -A default value of 0 means that this limit is disabled. -Only supported for `list` Redis `data_type`. - -[id="{version}-plugins-{type}s-{plugin}-data_type"] -===== `data_type` - - * Value can be any of: `list`, `channel` - * There is no default value for this setting. - -Either list or channel. If `data_type` is list, then we will set -RPUSH to key. If `data_type` is channel, then we will PUBLISH to `key`. - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * Value type is <> - * Default value is `0` - -The Redis database number. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `["127.0.0.1"]` - -The hostname(s) of your Redis server(s). Ports may be specified on any -hostname, which will override the global port config. -If the hosts list is an array, Logstash will pick one random host to connect to, -if that host is disconnected it will then pick another. - -For example: -[source,ruby] - "127.0.0.1" - ["127.0.0.1", "127.0.0.2"] - ["127.0.0.1:6380", "127.0.0.1"] - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * There is no default value for this setting. - -The name of a Redis list or channel. Dynamic names are -valid here, for example `logstash-%{type}`. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to authenticate with. There is no authentication by default. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6379` - -The default port to connect on. Can be overridden on any hostname. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `1` - -Interval for reconnecting to failed Redis connections - -[id="{version}-plugins-{type}s-{plugin}-shuffle_hosts"] -===== `shuffle_hosts` - - * Value type is <> - * Default value is `true` - -Shuffle the host list during Logstash startup. - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `5` - -Redis initial connection timeout in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/redis-v4.0.1.asciidoc b/docs/versioned-plugins/outputs/redis-v4.0.1.asciidoc deleted file mode 100644 index e89cebc75..000000000 --- a/docs/versioned-plugins/outputs/redis-v4.0.1.asciidoc +++ /dev/null @@ -1,202 +0,0 @@ -:plugin: redis -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.1 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-redis/blob/v4.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Redis output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output will send events to a Redis queue using RPUSH. -The RPUSH command is supported in Redis v0.0.7+. Using -PUBLISH to a channel requires at least v1.3.8+. -While you may be able to make these Redis versions work, -the best performance and stability will be found in more -recent stable versions. Versions 2.6.0+ are recommended. - -For more information, see http://redis.io/[the Redis homepage] - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Redis Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-congestion_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel"]`|No -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-shuffle_hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-batch"] -===== `batch` - - * Value type is <> - * Default value is `false` - -Set to true if you want Redis to batch up values and send 1 RPUSH command -instead of one command per value to push on the list. Note that this only -works with `data_type="list"` mode right now. - -If true, we send an RPUSH every "batch_events" events or -"batch_timeout" seconds (whichever comes first). -Only supported for `data_type` is "list". - -[id="{version}-plugins-{type}s-{plugin}-batch_events"] -===== `batch_events` - - * Value type is <> - * Default value is `50` - -If batch is set to true, the number of events we queue up for an RPUSH. - -[id="{version}-plugins-{type}s-{plugin}-batch_timeout"] -===== `batch_timeout` - - * Value type is <> - * Default value is `5` - -If batch is set to true, the maximum amount of time between RPUSH commands -when there are pending events to flush. - -[id="{version}-plugins-{type}s-{plugin}-congestion_interval"] -===== `congestion_interval` - - * Value type is <> - * Default value is `1` - -How often to check for congestion. Default is one second. -Zero means to check on every event. - -[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] -===== `congestion_threshold` - - * Value type is <> - * Default value is `0` - -In case Redis `data_type` is `list` and has more than `@congestion_threshold` items, -block until someone consumes them and reduces congestion, otherwise if there are -no consumers Redis will run out of memory, unless it was configured with OOM protection. -But even with OOM protection, a single Redis list can block all other users of Redis, -until Redis CPU consumption reaches the max allowed RAM size. -A default value of 0 means that this limit is disabled. -Only supported for `list` Redis `data_type`. - -[id="{version}-plugins-{type}s-{plugin}-data_type"] -===== `data_type` - - * Value can be any of: `list`, `channel` - * There is no default value for this setting. - -Either list or channel. If `data_type` is list, then we will set -RPUSH to key. If `data_type` is channel, then we will PUBLISH to `key`. - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * Value type is <> - * Default value is `0` - -The Redis database number. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `["127.0.0.1"]` - -The hostname(s) of your Redis server(s). Ports may be specified on any -hostname, which will override the global port config. -If the hosts list is an array, Logstash will pick one random host to connect to, -if that host is disconnected it will then pick another. - -For example: -[source,ruby] - "127.0.0.1" - ["127.0.0.1", "127.0.0.2"] - ["127.0.0.1:6380", "127.0.0.1"] - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * There is no default value for this setting. - -The name of a Redis list or channel. Dynamic names are -valid here, for example `logstash-%{type}`. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to authenticate with. There is no authentication by default. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6379` - -The default port to connect on. Can be overridden on any hostname. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `1` - -Interval for reconnecting to failed Redis connections - -[id="{version}-plugins-{type}s-{plugin}-shuffle_hosts"] -===== `shuffle_hosts` - - * Value type is <> - * Default value is `true` - -Shuffle the host list during Logstash startup. - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `5` - -Redis initial connection timeout in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/redis-v4.0.2.asciidoc b/docs/versioned-plugins/outputs/redis-v4.0.2.asciidoc deleted file mode 100644 index 9c65d3678..000000000 --- a/docs/versioned-plugins/outputs/redis-v4.0.2.asciidoc +++ /dev/null @@ -1,202 +0,0 @@ -:plugin: redis -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.2 -:release_date: 2017-09-12 -:changelog_url: https://github.com/logstash-plugins/logstash-output-redis/blob/v4.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Redis output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output will send events to a Redis queue using RPUSH. -The RPUSH command is supported in Redis v0.0.7+. Using -PUBLISH to a channel requires at least v1.3.8+. -While you may be able to make these Redis versions work, -the best performance and stability will be found in more -recent stable versions. Versions 2.6.0+ are recommended. - -For more information, see http://redis.io/[the Redis homepage] - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Redis Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-congestion_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel"]`|No -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-shuffle_hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-batch"] -===== `batch` - - * Value type is <> - * Default value is `false` - -Set to true if you want Redis to batch up values and send 1 RPUSH command -instead of one command per value to push on the list. Note that this only -works with `data_type="list"` mode right now. - -If true, we send an RPUSH every "batch_events" events or -"batch_timeout" seconds (whichever comes first). -Only supported for `data_type` is "list". - -[id="{version}-plugins-{type}s-{plugin}-batch_events"] -===== `batch_events` - - * Value type is <> - * Default value is `50` - -If batch is set to true, the number of events we queue up for an RPUSH. - -[id="{version}-plugins-{type}s-{plugin}-batch_timeout"] -===== `batch_timeout` - - * Value type is <> - * Default value is `5` - -If batch is set to true, the maximum amount of time between RPUSH commands -when there are pending events to flush. - -[id="{version}-plugins-{type}s-{plugin}-congestion_interval"] -===== `congestion_interval` - - * Value type is <> - * Default value is `1` - -How often to check for congestion. Default is one second. -Zero means to check on every event. - -[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] -===== `congestion_threshold` - - * Value type is <> - * Default value is `0` - -In case Redis `data_type` is `list` and has more than `@congestion_threshold` items, -block until someone consumes them and reduces congestion, otherwise if there are -no consumers Redis will run out of memory, unless it was configured with OOM protection. -But even with OOM protection, a single Redis list can block all other users of Redis, -until Redis CPU consumption reaches the max allowed RAM size. -A default value of 0 means that this limit is disabled. -Only supported for `list` Redis `data_type`. - -[id="{version}-plugins-{type}s-{plugin}-data_type"] -===== `data_type` - - * Value can be any of: `list`, `channel` - * There is no default value for this setting. - -Either list or channel. If `data_type` is list, then we will set -RPUSH to key. If `data_type` is channel, then we will PUBLISH to `key`. - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * Value type is <> - * Default value is `0` - -The Redis database number. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `["127.0.0.1"]` - -The hostname(s) of your Redis server(s). Ports may be specified on any -hostname, which will override the global port config. -If the hosts list is an array, Logstash will pick one random host to connect to, -if that host is disconnected it will then pick another. - -For example: -[source,ruby] - "127.0.0.1" - ["127.0.0.1", "127.0.0.2"] - ["127.0.0.1:6380", "127.0.0.1"] - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * There is no default value for this setting. - -The name of a Redis list or channel. Dynamic names are -valid here, for example `logstash-%{type}`. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to authenticate with. There is no authentication by default. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6379` - -The default port to connect on. Can be overridden on any hostname. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `1` - -Interval for reconnecting to failed Redis connections - -[id="{version}-plugins-{type}s-{plugin}-shuffle_hosts"] -===== `shuffle_hosts` - - * Value type is <> - * Default value is `true` - -Shuffle the host list during Logstash startup. - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `5` - -Redis initial connection timeout in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/redis-v4.0.3.asciidoc b/docs/versioned-plugins/outputs/redis-v4.0.3.asciidoc deleted file mode 100644 index acf8fbbb3..000000000 --- a/docs/versioned-plugins/outputs/redis-v4.0.3.asciidoc +++ /dev/null @@ -1,202 +0,0 @@ -:plugin: redis -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.3 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-redis/blob/v4.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Redis output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output will send events to a Redis queue using RPUSH. -The RPUSH command is supported in Redis v0.0.7+. Using -PUBLISH to a channel requires at least v1.3.8+. -While you may be able to make these Redis versions work, -the best performance and stability will be found in more -recent stable versions. Versions 2.6.0+ are recommended. - -For more information, see http://redis.io/[the Redis homepage] - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Redis Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-batch>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-congestion_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-congestion_threshold>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-data_type>> |<>, one of `["list", "channel"]`|No -| <<{version}-plugins-{type}s-{plugin}-db>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-shuffle_hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-batch"] -===== `batch` - - * Value type is <> - * Default value is `false` - -Set to true if you want Redis to batch up values and send 1 RPUSH command -instead of one command per value to push on the list. Note that this only -works with `data_type="list"` mode right now. - -If true, we send an RPUSH every "batch_events" events or -"batch_timeout" seconds (whichever comes first). -Only supported for `data_type` is "list". - -[id="{version}-plugins-{type}s-{plugin}-batch_events"] -===== `batch_events` - - * Value type is <> - * Default value is `50` - -If batch is set to true, the number of events we queue up for an RPUSH. - -[id="{version}-plugins-{type}s-{plugin}-batch_timeout"] -===== `batch_timeout` - - * Value type is <> - * Default value is `5` - -If batch is set to true, the maximum amount of time between RPUSH commands -when there are pending events to flush. - -[id="{version}-plugins-{type}s-{plugin}-congestion_interval"] -===== `congestion_interval` - - * Value type is <> - * Default value is `1` - -How often to check for congestion. Default is one second. -Zero means to check on every event. - -[id="{version}-plugins-{type}s-{plugin}-congestion_threshold"] -===== `congestion_threshold` - - * Value type is <> - * Default value is `0` - -In case Redis `data_type` is `list` and has more than `@congestion_threshold` items, -block until someone consumes them and reduces congestion, otherwise if there are -no consumers Redis will run out of memory, unless it was configured with OOM protection. -But even with OOM protection, a single Redis list can block all other users of Redis, -until Redis CPU consumption reaches the max allowed RAM size. -A default value of 0 means that this limit is disabled. -Only supported for `list` Redis `data_type`. - -[id="{version}-plugins-{type}s-{plugin}-data_type"] -===== `data_type` - - * Value can be any of: `list`, `channel` - * There is no default value for this setting. - -Either list or channel. If `data_type` is list, then we will set -RPUSH to key. If `data_type` is channel, then we will PUBLISH to `key`. - -[id="{version}-plugins-{type}s-{plugin}-db"] -===== `db` - - * Value type is <> - * Default value is `0` - -The Redis database number. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `["127.0.0.1"]` - -The hostname(s) of your Redis server(s). Ports may be specified on any -hostname, which will override the global port config. -If the hosts list is an array, Logstash will pick one random host to connect to, -if that host is disconnected it will then pick another. - -For example: -[source,ruby] - "127.0.0.1" - ["127.0.0.1", "127.0.0.2"] - ["127.0.0.1:6380", "127.0.0.1"] - -[id="{version}-plugins-{type}s-{plugin}-key"] -===== `key` - - * Value type is <> - * There is no default value for this setting. - -The name of a Redis list or channel. Dynamic names are -valid here, for example `logstash-%{type}`. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * There is no default value for this setting. - -Password to authenticate with. There is no authentication by default. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `6379` - -The default port to connect on. Can be overridden on any hostname. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `1` - -Interval for reconnecting to failed Redis connections - -[id="{version}-plugins-{type}s-{plugin}-shuffle_hosts"] -===== `shuffle_hosts` - - * Value type is <> - * Default value is `true` - -Shuffle the host list during Logstash startup. - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `5` - -Redis initial connection timeout in seconds. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/redmine-index.asciidoc b/docs/versioned-plugins/outputs/redmine-index.asciidoc deleted file mode 100644 index e97f2498a..000000000 --- a/docs/versioned-plugins/outputs/redmine-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: redmine -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::redmine-v3.0.3.asciidoc[] -include::redmine-v3.0.2.asciidoc[] -include::redmine-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/redmine-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/redmine-v3.0.1.asciidoc deleted file mode 100644 index 63c054c02..000000000 --- a/docs/versioned-plugins/outputs/redmine-v3.0.1.asciidoc +++ /dev/null @@ -1,192 +0,0 @@ -:plugin: redmine -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-redmine/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Redmine output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The redmine output is used to create a ticket via the API redmine. - -It send a POST request in a JSON format and use TOKEN authentication - - --- Exemple of use -- -[source,ruby] - output { - redmine { - url => "http://redmineserver.tld" - token => 'token' - project_id => 200 - tracker_id => 1 - status_id => 3 - priority_id => 2 - subject => "Error ... detected" - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Redmine Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-assigned_to_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-categorie_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-description>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fixed_version_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent_issue_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-priority_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-status_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-subject>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-token>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tracker_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-assigned_to_id"] -===== `assigned_to_id` - - * Value type is <> - * Default value is `nil` - -redmine issue assigned_to -not required for post_issue - -[id="{version}-plugins-{type}s-{plugin}-categorie_id"] -===== `categorie_id` - - * Value type is <> - * Default value is `nil` - -not required for post_issue - -[id="{version}-plugins-{type}s-{plugin}-description"] -===== `description` - - * Value type is <> - * Default value is `"%{message}"` - -redmine issue description -required - -[id="{version}-plugins-{type}s-{plugin}-fixed_version_id"] -===== `fixed_version_id` - - * Value type is <> - * Default value is `nil` - -redmine issue fixed_version_id - -[id="{version}-plugins-{type}s-{plugin}-parent_issue_id"] -===== `parent_issue_id` - - * Value type is <> - * Default value is `nil` - -redmine issue parent_issue_id -not required for post_issue - -[id="{version}-plugins-{type}s-{plugin}-priority_id"] -===== `priority_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -redmine issue priority_id -required - -[id="{version}-plugins-{type}s-{plugin}-project_id"] -===== `project_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -redmine issue projet_id -required - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `false` - - - -[id="{version}-plugins-{type}s-{plugin}-status_id"] -===== `status_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -redmine issue status_id -required - -[id="{version}-plugins-{type}s-{plugin}-subject"] -===== `subject` - - * Value type is <> - * Default value is `"%{host}"` - -redmine issue subject -required - -[id="{version}-plugins-{type}s-{plugin}-token"] -===== `token` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -redmine token user used for authentication - -[id="{version}-plugins-{type}s-{plugin}-tracker_id"] -===== `tracker_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -redmine issue tracker_id -required - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -host of redmine app -value format : 'http://urlofredmine.tld' - Not add '/issues' at end - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/redmine-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/redmine-v3.0.2.asciidoc deleted file mode 100644 index 029c4dbfb..000000000 --- a/docs/versioned-plugins/outputs/redmine-v3.0.2.asciidoc +++ /dev/null @@ -1,192 +0,0 @@ -:plugin: redmine -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-redmine/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Redmine output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The redmine output is used to create a ticket via the API redmine. - -It send a POST request in a JSON format and use TOKEN authentication - - --- Exemple of use -- -[source,ruby] - output { - redmine { - url => "http://redmineserver.tld" - token => 'token' - project_id => 200 - tracker_id => 1 - status_id => 3 - priority_id => 2 - subject => "Error ... detected" - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Redmine Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-assigned_to_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-categorie_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-description>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fixed_version_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent_issue_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-priority_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-status_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-subject>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-token>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tracker_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-assigned_to_id"] -===== `assigned_to_id` - - * Value type is <> - * Default value is `nil` - -redmine issue assigned_to -not required for post_issue - -[id="{version}-plugins-{type}s-{plugin}-categorie_id"] -===== `categorie_id` - - * Value type is <> - * Default value is `nil` - -not required for post_issue - -[id="{version}-plugins-{type}s-{plugin}-description"] -===== `description` - - * Value type is <> - * Default value is `"%{message}"` - -redmine issue description -required - -[id="{version}-plugins-{type}s-{plugin}-fixed_version_id"] -===== `fixed_version_id` - - * Value type is <> - * Default value is `nil` - -redmine issue fixed_version_id - -[id="{version}-plugins-{type}s-{plugin}-parent_issue_id"] -===== `parent_issue_id` - - * Value type is <> - * Default value is `nil` - -redmine issue parent_issue_id -not required for post_issue - -[id="{version}-plugins-{type}s-{plugin}-priority_id"] -===== `priority_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -redmine issue priority_id -required - -[id="{version}-plugins-{type}s-{plugin}-project_id"] -===== `project_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -redmine issue projet_id -required - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `false` - - - -[id="{version}-plugins-{type}s-{plugin}-status_id"] -===== `status_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -redmine issue status_id -required - -[id="{version}-plugins-{type}s-{plugin}-subject"] -===== `subject` - - * Value type is <> - * Default value is `"%{host}"` - -redmine issue subject -required - -[id="{version}-plugins-{type}s-{plugin}-token"] -===== `token` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -redmine token user used for authentication - -[id="{version}-plugins-{type}s-{plugin}-tracker_id"] -===== `tracker_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -redmine issue tracker_id -required - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -host of redmine app -value format : 'http://urlofredmine.tld' - Not add '/issues' at end - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/redmine-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/redmine-v3.0.3.asciidoc deleted file mode 100644 index 474b2b8c8..000000000 --- a/docs/versioned-plugins/outputs/redmine-v3.0.3.asciidoc +++ /dev/null @@ -1,192 +0,0 @@ -:plugin: redmine -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-redmine/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Redmine output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The redmine output is used to create a ticket via the API redmine. - -It send a POST request in a JSON format and use TOKEN authentication - - --- Exemple of use -- -[source,ruby] - output { - redmine { - url => "http://redmineserver.tld" - token => 'token' - project_id => 200 - tracker_id => 1 - status_id => 3 - priority_id => 2 - subject => "Error ... detected" - } - } - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Redmine Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-assigned_to_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-categorie_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-description>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fixed_version_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent_issue_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-priority_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-project_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-status_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-subject>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-token>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-tracker_id>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-assigned_to_id"] -===== `assigned_to_id` - - * Value type is <> - * Default value is `nil` - -redmine issue assigned_to -not required for post_issue - -[id="{version}-plugins-{type}s-{plugin}-categorie_id"] -===== `categorie_id` - - * Value type is <> - * Default value is `nil` - -not required for post_issue - -[id="{version}-plugins-{type}s-{plugin}-description"] -===== `description` - - * Value type is <> - * Default value is `"%{message}"` - -redmine issue description -required - -[id="{version}-plugins-{type}s-{plugin}-fixed_version_id"] -===== `fixed_version_id` - - * Value type is <> - * Default value is `nil` - -redmine issue fixed_version_id - -[id="{version}-plugins-{type}s-{plugin}-parent_issue_id"] -===== `parent_issue_id` - - * Value type is <> - * Default value is `nil` - -redmine issue parent_issue_id -not required for post_issue - -[id="{version}-plugins-{type}s-{plugin}-priority_id"] -===== `priority_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -redmine issue priority_id -required - -[id="{version}-plugins-{type}s-{plugin}-project_id"] -===== `project_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -redmine issue projet_id -required - -[id="{version}-plugins-{type}s-{plugin}-ssl"] -===== `ssl` - - * Value type is <> - * Default value is `false` - - - -[id="{version}-plugins-{type}s-{plugin}-status_id"] -===== `status_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -redmine issue status_id -required - -[id="{version}-plugins-{type}s-{plugin}-subject"] -===== `subject` - - * Value type is <> - * Default value is `"%{host}"` - -redmine issue subject -required - -[id="{version}-plugins-{type}s-{plugin}-token"] -===== `token` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -redmine token user used for authentication - -[id="{version}-plugins-{type}s-{plugin}-tracker_id"] -===== `tracker_id` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -redmine issue tracker_id -required - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -host of redmine app -value format : 'http://urlofredmine.tld' - Not add '/issues' at end - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/riak-index.asciidoc b/docs/versioned-plugins/outputs/riak-index.asciidoc deleted file mode 100644 index e0af7b4c2..000000000 --- a/docs/versioned-plugins/outputs/riak-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: riak -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::riak-v3.0.3.asciidoc[] -include::riak-v3.0.2.asciidoc[] -include::riak-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/riak-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/riak-v3.0.1.asciidoc deleted file mode 100644 index c1c739f52..000000000 --- a/docs/versioned-plugins/outputs/riak-v3.0.1.asciidoc +++ /dev/null @@ -1,177 +0,0 @@ -:plugin: riak -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-riak/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Riak output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Riak is a distributed k/v store from Basho. -It's based on the Dynamo model. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Riak Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bucket_props>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-enable_search>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-enable_ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-indices>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nodes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proto>> |<>, one of `["http", "pb"]`|No -| <<{version}-plugins-{type}s-{plugin}-ssl_opts>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-bucket"] -===== `bucket` - - * Value type is <> - * Default value is `["logstash-%{+YYYY.MM.dd}"]` - -The bucket name to write events to -Expansion is supported here as values are -passed through event.sprintf -Multiple buckets can be specified here -but any bucket-specific settings defined -apply to ALL the buckets. - -[id="{version}-plugins-{type}s-{plugin}-bucket_props"] -===== `bucket_props` - - * Value type is <> - * There is no default value for this setting. - -Bucket properties (NYI) -Logstash hash of properties for the bucket -i.e. -[source,ruby] - bucket_props => { - "r" => "one" - "w" => "one" - "dw", "one - } -or -[source,ruby] - bucket_props => { "n_val" => "3" } -Properties will be passed as-is - -[id="{version}-plugins-{type}s-{plugin}-enable_search"] -===== `enable_search` - - * Value type is <> - * Default value is `false` - -Search -Enable search on the bucket defined above - -[id="{version}-plugins-{type}s-{plugin}-enable_ssl"] -===== `enable_ssl` - - * Value type is <> - * Default value is `false` - -SSL -Enable SSL - -[id="{version}-plugins-{type}s-{plugin}-indices"] -===== `indices` - - * Value type is <> - * There is no default value for this setting. - -Indices -Array of fields to add 2i on -e.g. -[source,ruby] - `indices => ["source_host", "type"] -Off by default as not everyone runs eleveldb - -[id="{version}-plugins-{type}s-{plugin}-key_name"] -===== `key_name` - - * Value type is <> - * There is no default value for this setting. - -The event key name -variables are valid here. - -Choose this carefully. Best to let riak decide. - -[id="{version}-plugins-{type}s-{plugin}-nodes"] -===== `nodes` - - * Value type is <> - * Default value is `{"localhost"=>"8098"}` - -The nodes of your Riak cluster -This can be a single host or -a Logstash hash of node/port pairs -e.g -[source,ruby] - { - "node1" => "8098" - "node2" => "8098" - } - -[id="{version}-plugins-{type}s-{plugin}-proto"] -===== `proto` - - * Value can be any of: `http`, `pb` - * Default value is `"http"` - -The protocol to use -HTTP or ProtoBuf -Applies to ALL backends listed above -No mix and match - -[id="{version}-plugins-{type}s-{plugin}-ssl_opts"] -===== `ssl_opts` - - * Value type is <> - * There is no default value for this setting. - -SSL Options -Options for SSL connections -Only applied if SSL is enabled -Logstash hash that maps to the riak-client options -here: https://github.com/basho/riak-ruby-client/wiki/Connecting-to-Riak -You'll likely want something like this: - -[source, ruby] - ssl_opts => { - "pem" => "/etc/riak.pem" - "ca_path" => "/usr/share/certificates" - } - -Per the riak client docs, the above sample options -will turn on SSL `VERIFY_PEER` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/riak-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/riak-v3.0.2.asciidoc deleted file mode 100644 index 6ad882972..000000000 --- a/docs/versioned-plugins/outputs/riak-v3.0.2.asciidoc +++ /dev/null @@ -1,177 +0,0 @@ -:plugin: riak -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-riak/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Riak output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Riak is a distributed k/v store from Basho. -It's based on the Dynamo model. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Riak Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bucket_props>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-enable_search>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-enable_ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-indices>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nodes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proto>> |<>, one of `["http", "pb"]`|No -| <<{version}-plugins-{type}s-{plugin}-ssl_opts>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-bucket"] -===== `bucket` - - * Value type is <> - * Default value is `["logstash-%{+YYYY.MM.dd}"]` - -The bucket name to write events to -Expansion is supported here as values are -passed through event.sprintf -Multiple buckets can be specified here -but any bucket-specific settings defined -apply to ALL the buckets. - -[id="{version}-plugins-{type}s-{plugin}-bucket_props"] -===== `bucket_props` - - * Value type is <> - * There is no default value for this setting. - -Bucket properties (NYI) -Logstash hash of properties for the bucket -i.e. -[source,ruby] - bucket_props => { - "r" => "one" - "w" => "one" - "dw", "one - } -or -[source,ruby] - bucket_props => { "n_val" => "3" } -Properties will be passed as-is - -[id="{version}-plugins-{type}s-{plugin}-enable_search"] -===== `enable_search` - - * Value type is <> - * Default value is `false` - -Search -Enable search on the bucket defined above - -[id="{version}-plugins-{type}s-{plugin}-enable_ssl"] -===== `enable_ssl` - - * Value type is <> - * Default value is `false` - -SSL -Enable SSL - -[id="{version}-plugins-{type}s-{plugin}-indices"] -===== `indices` - - * Value type is <> - * There is no default value for this setting. - -Indices -Array of fields to add 2i on -e.g. -[source,ruby] - `indices => ["source_host", "type"] -Off by default as not everyone runs eleveldb - -[id="{version}-plugins-{type}s-{plugin}-key_name"] -===== `key_name` - - * Value type is <> - * There is no default value for this setting. - -The event key name -variables are valid here. - -Choose this carefully. Best to let riak decide. - -[id="{version}-plugins-{type}s-{plugin}-nodes"] -===== `nodes` - - * Value type is <> - * Default value is `{"localhost"=>"8098"}` - -The nodes of your Riak cluster -This can be a single host or -a Logstash hash of node/port pairs -e.g -[source,ruby] - { - "node1" => "8098" - "node2" => "8098" - } - -[id="{version}-plugins-{type}s-{plugin}-proto"] -===== `proto` - - * Value can be any of: `http`, `pb` - * Default value is `"http"` - -The protocol to use -HTTP or ProtoBuf -Applies to ALL backends listed above -No mix and match - -[id="{version}-plugins-{type}s-{plugin}-ssl_opts"] -===== `ssl_opts` - - * Value type is <> - * There is no default value for this setting. - -SSL Options -Options for SSL connections -Only applied if SSL is enabled -Logstash hash that maps to the riak-client options -here: https://github.com/basho/riak-ruby-client/wiki/Connecting-to-Riak -You'll likely want something like this: - -[source, ruby] - ssl_opts => { - "pem" => "/etc/riak.pem" - "ca_path" => "/usr/share/certificates" - } - -Per the riak client docs, the above sample options -will turn on SSL `VERIFY_PEER` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/riak-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/riak-v3.0.3.asciidoc deleted file mode 100644 index 08762aeed..000000000 --- a/docs/versioned-plugins/outputs/riak-v3.0.3.asciidoc +++ /dev/null @@ -1,177 +0,0 @@ -:plugin: riak -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-riak/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Riak output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Riak is a distributed k/v store from Basho. -It's based on the Dynamo model. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Riak Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bucket_props>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-enable_search>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-enable_ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-indices>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-key_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nodes>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proto>> |<>, one of `["http", "pb"]`|No -| <<{version}-plugins-{type}s-{plugin}-ssl_opts>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-bucket"] -===== `bucket` - - * Value type is <> - * Default value is `["logstash-%{+YYYY.MM.dd}"]` - -The bucket name to write events to -Expansion is supported here as values are -passed through event.sprintf -Multiple buckets can be specified here -but any bucket-specific settings defined -apply to ALL the buckets. - -[id="{version}-plugins-{type}s-{plugin}-bucket_props"] -===== `bucket_props` - - * Value type is <> - * There is no default value for this setting. - -Bucket properties (NYI) -Logstash hash of properties for the bucket -i.e. -[source,ruby] - bucket_props => { - "r" => "one" - "w" => "one" - "dw", "one - } -or -[source,ruby] - bucket_props => { "n_val" => "3" } -Properties will be passed as-is - -[id="{version}-plugins-{type}s-{plugin}-enable_search"] -===== `enable_search` - - * Value type is <> - * Default value is `false` - -Search -Enable search on the bucket defined above - -[id="{version}-plugins-{type}s-{plugin}-enable_ssl"] -===== `enable_ssl` - - * Value type is <> - * Default value is `false` - -SSL -Enable SSL - -[id="{version}-plugins-{type}s-{plugin}-indices"] -===== `indices` - - * Value type is <> - * There is no default value for this setting. - -Indices -Array of fields to add 2i on -e.g. -[source,ruby] - `indices => ["source_host", "type"] -Off by default as not everyone runs eleveldb - -[id="{version}-plugins-{type}s-{plugin}-key_name"] -===== `key_name` - - * Value type is <> - * There is no default value for this setting. - -The event key name -variables are valid here. - -Choose this carefully. Best to let riak decide. - -[id="{version}-plugins-{type}s-{plugin}-nodes"] -===== `nodes` - - * Value type is <> - * Default value is `{"localhost"=>"8098"}` - -The nodes of your Riak cluster -This can be a single host or -a Logstash hash of node/port pairs -e.g -[source,ruby] - { - "node1" => "8098" - "node2" => "8098" - } - -[id="{version}-plugins-{type}s-{plugin}-proto"] -===== `proto` - - * Value can be any of: `http`, `pb` - * Default value is `"http"` - -The protocol to use -HTTP or ProtoBuf -Applies to ALL backends listed above -No mix and match - -[id="{version}-plugins-{type}s-{plugin}-ssl_opts"] -===== `ssl_opts` - - * Value type is <> - * There is no default value for this setting. - -SSL Options -Options for SSL connections -Only applied if SSL is enabled -Logstash hash that maps to the riak-client options -here: https://github.com/basho/riak-ruby-client/wiki/Connecting-to-Riak -You'll likely want something like this: - -[source, ruby] - ssl_opts => { - "pem" => "/etc/riak.pem" - "ca_path" => "/usr/share/certificates" - } - -Per the riak client docs, the above sample options -will turn on SSL `VERIFY_PEER` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/riemann-index.asciidoc b/docs/versioned-plugins/outputs/riemann-index.asciidoc deleted file mode 100644 index 059555469..000000000 --- a/docs/versioned-plugins/outputs/riemann-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: riemann -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::riemann-v3.0.3.asciidoc[] -include::riemann-v3.0.2.asciidoc[] -include::riemann-v3.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/riemann-v3.0.1.asciidoc b/docs/versioned-plugins/outputs/riemann-v3.0.1.asciidoc deleted file mode 100644 index 511a4763f..000000000 --- a/docs/versioned-plugins/outputs/riemann-v3.0.1.asciidoc +++ /dev/null @@ -1,178 +0,0 @@ -:plugin: riemann -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-riemann/blob/v3.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Riemann output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Riemann is a network event stream processing system. - -While Riemann is very similar conceptually to Logstash, it has -much more in terms of being a monitoring system replacement. - -Riemann is used in Logstash much like statsd or other metric-related -outputs - -You can learn about Riemann here: - -* http://riemann.io/ -You can see the author talk about it here: -* http://vimeo.com/38377415 - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Riemann Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-debug>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-map_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-protocol>> |<>, one of `["tcp", "udp"]`|No -| <<{version}-plugins-{type}s-{plugin}-riemann_event>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-debug"] -===== `debug` - - * Value type is <> - * Default value is `false` - - -Enable debugging output? - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The address of the Riemann server. - -[id="{version}-plugins-{type}s-{plugin}-map_fields"] -===== `map_fields` - - * Value type is <> - * Default value is `false` - -If set to true automatically map all logstash defined fields to riemann event fields. -All nested logstash fields will be mapped to riemann fields containing all parent keys -separated by dots and the deepest value. - -As an example, the logstash event: -[source,ruby] - { - "@timestamp":"2013-12-10T14:36:26.151+0000", - "@version": 1, - "message":"log message", - "host": "host.domain.com", - "nested_field": { - "key": "value" - } - } -Is mapped to this riemann event: -[source,ruby] - { - :time 1386686186, - :host host.domain.com, - :message log message, - :nested_field.key value - } - -It can be used in conjunction with or independent of the riemann_event option. -When used with the riemann_event any duplicate keys receive their value from -riemann_event instead of the logstash event itself. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5555` - -The port to connect to on your Riemann server. - -[id="{version}-plugins-{type}s-{plugin}-protocol"] -===== `protocol` - - * Value can be any of: `tcp`, `udp` - * Default value is `"tcp"` - -The protocol to use -UDP is non-blocking -TCP is blocking - -Logstash's default output behaviour -is to never lose events -As such, we use tcp as default here - -[id="{version}-plugins-{type}s-{plugin}-riemann_event"] -===== `riemann_event` - - * Value type is <> - * There is no default value for this setting. - -A Hash to set Riemann event fields -(http://riemann.io/concepts.html). - -The following event fields are supported: -`description`, `state`, `metric`, `ttl`, `service` - -Tags found on the Logstash event will automatically be added to the -Riemann event. - -Any other field set here will be passed to Riemann as an event attribute. - -Example: -[source,ruby] - riemann { - riemann_event => { - "metric" => "%{metric}" - "service" => "%{service}" - } - } - -`metric` and `ttl` values will be coerced to a floating point value. -Values which cannot be coerced will zero (0.0). - -`description`, by default, will be set to the event message -but can be overridden here. - -[id="{version}-plugins-{type}s-{plugin}-sender"] -===== `sender` - - * Value type is <> - * Default value is `"%{host}"` - -The name of the sender. -This sets the `host` value -in the Riemann event - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/riemann-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/riemann-v3.0.2.asciidoc deleted file mode 100644 index 05127d6f2..000000000 --- a/docs/versioned-plugins/outputs/riemann-v3.0.2.asciidoc +++ /dev/null @@ -1,178 +0,0 @@ -:plugin: riemann -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-riemann/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Riemann output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Riemann is a network event stream processing system. - -While Riemann is very similar conceptually to Logstash, it has -much more in terms of being a monitoring system replacement. - -Riemann is used in Logstash much like statsd or other metric-related -outputs - -You can learn about Riemann here: - -* http://riemann.io/ -You can see the author talk about it here: -* http://vimeo.com/38377415 - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Riemann Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-debug>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-map_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-protocol>> |<>, one of `["tcp", "udp"]`|No -| <<{version}-plugins-{type}s-{plugin}-riemann_event>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-debug"] -===== `debug` - - * Value type is <> - * Default value is `false` - - -Enable debugging output? - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The address of the Riemann server. - -[id="{version}-plugins-{type}s-{plugin}-map_fields"] -===== `map_fields` - - * Value type is <> - * Default value is `false` - -If set to true automatically map all logstash defined fields to riemann event fields. -All nested logstash fields will be mapped to riemann fields containing all parent keys -separated by dots and the deepest value. - -As an example, the logstash event: -[source,ruby] - { - "@timestamp":"2013-12-10T14:36:26.151+0000", - "@version": 1, - "message":"log message", - "host": "host.domain.com", - "nested_field": { - "key": "value" - } - } -Is mapped to this riemann event: -[source,ruby] - { - :time 1386686186, - :host host.domain.com, - :message log message, - :nested_field.key value - } - -It can be used in conjunction with or independent of the riemann_event option. -When used with the riemann_event any duplicate keys receive their value from -riemann_event instead of the logstash event itself. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5555` - -The port to connect to on your Riemann server. - -[id="{version}-plugins-{type}s-{plugin}-protocol"] -===== `protocol` - - * Value can be any of: `tcp`, `udp` - * Default value is `"tcp"` - -The protocol to use -UDP is non-blocking -TCP is blocking - -Logstash's default output behaviour -is to never lose events -As such, we use tcp as default here - -[id="{version}-plugins-{type}s-{plugin}-riemann_event"] -===== `riemann_event` - - * Value type is <> - * There is no default value for this setting. - -A Hash to set Riemann event fields -(http://riemann.io/concepts.html). - -The following event fields are supported: -`description`, `state`, `metric`, `ttl`, `service` - -Tags found on the Logstash event will automatically be added to the -Riemann event. - -Any other field set here will be passed to Riemann as an event attribute. - -Example: -[source,ruby] - riemann { - riemann_event => { - "metric" => "%{metric}" - "service" => "%{service}" - } - } - -`metric` and `ttl` values will be coerced to a floating point value. -Values which cannot be coerced will zero (0.0). - -`description`, by default, will be set to the event message -but can be overridden here. - -[id="{version}-plugins-{type}s-{plugin}-sender"] -===== `sender` - - * Value type is <> - * Default value is `"%{host}"` - -The name of the sender. -This sets the `host` value -in the Riemann event - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/riemann-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/riemann-v3.0.3.asciidoc deleted file mode 100644 index 68538cf4f..000000000 --- a/docs/versioned-plugins/outputs/riemann-v3.0.3.asciidoc +++ /dev/null @@ -1,178 +0,0 @@ -:plugin: riemann -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-riemann/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Riemann output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Riemann is a network event stream processing system. - -While Riemann is very similar conceptually to Logstash, it has -much more in terms of being a monitoring system replacement. - -Riemann is used in Logstash much like statsd or other metric-related -outputs - -You can learn about Riemann here: - -* http://riemann.io/ -You can see the author talk about it here: -* http://vimeo.com/38377415 - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Riemann Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-debug>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-map_fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-protocol>> |<>, one of `["tcp", "udp"]`|No -| <<{version}-plugins-{type}s-{plugin}-riemann_event>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-debug"] -===== `debug` - - * Value type is <> - * Default value is `false` - - -Enable debugging output? - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The address of the Riemann server. - -[id="{version}-plugins-{type}s-{plugin}-map_fields"] -===== `map_fields` - - * Value type is <> - * Default value is `false` - -If set to true automatically map all logstash defined fields to riemann event fields. -All nested logstash fields will be mapped to riemann fields containing all parent keys -separated by dots and the deepest value. - -As an example, the logstash event: -[source,ruby] - { - "@timestamp":"2013-12-10T14:36:26.151+0000", - "@version": 1, - "message":"log message", - "host": "host.domain.com", - "nested_field": { - "key": "value" - } - } -Is mapped to this riemann event: -[source,ruby] - { - :time 1386686186, - :host host.domain.com, - :message log message, - :nested_field.key value - } - -It can be used in conjunction with or independent of the riemann_event option. -When used with the riemann_event any duplicate keys receive their value from -riemann_event instead of the logstash event itself. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `5555` - -The port to connect to on your Riemann server. - -[id="{version}-plugins-{type}s-{plugin}-protocol"] -===== `protocol` - - * Value can be any of: `tcp`, `udp` - * Default value is `"tcp"` - -The protocol to use -UDP is non-blocking -TCP is blocking - -Logstash's default output behaviour -is to never lose events -As such, we use tcp as default here - -[id="{version}-plugins-{type}s-{plugin}-riemann_event"] -===== `riemann_event` - - * Value type is <> - * There is no default value for this setting. - -A Hash to set Riemann event fields -(http://riemann.io/concepts.html). - -The following event fields are supported: -`description`, `state`, `metric`, `ttl`, `service` - -Tags found on the Logstash event will automatically be added to the -Riemann event. - -Any other field set here will be passed to Riemann as an event attribute. - -Example: -[source,ruby] - riemann { - riemann_event => { - "metric" => "%{metric}" - "service" => "%{service}" - } - } - -`metric` and `ttl` values will be coerced to a floating point value. -Values which cannot be coerced will zero (0.0). - -`description`, by default, will be set to the event message -but can be overridden here. - -[id="{version}-plugins-{type}s-{plugin}-sender"] -===== `sender` - - * Value type is <> - * Default value is `"%{host}"` - -The name of the sender. -This sets the `host` value -in the Riemann event - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/s3-index.asciidoc b/docs/versioned-plugins/outputs/s3-index.asciidoc deleted file mode 100644 index d4b4e8220..000000000 --- a/docs/versioned-plugins/outputs/s3-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: s3 -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-07-19 -| <> | 2017-06-23 -|======================================================================= - -include::s3-v4.0.9.asciidoc[] -include::s3-v4.0.8.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/s3-v4.0.8.asciidoc b/docs/versioned-plugins/outputs/s3-v4.0.8.asciidoc deleted file mode 100644 index 81c3425ce..000000000 --- a/docs/versioned-plugins/outputs/s3-v4.0.8.asciidoc +++ /dev/null @@ -1,327 +0,0 @@ -:plugin: s3 -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.8 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-s3/blob/v4.0.8/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== S3 output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -INFORMATION: - -This plugin batches and uploads logstash events into Amazon Simple Storage Service (Amazon S3). - -Requirements: -* Amazon S3 Bucket and S3 Access Permissions (Typically access_key_id and secret_access_key) -* S3 PutObject permission - -S3 outputs create temporary files into the OS' temporary directory, you can specify where to save them using the `temporary_directory` option. - -S3 output files have the following format - -ls.s3.312bc026-2f5d-49bc-ae9f-5940cf4ad9a6.2013-04-18T10.00.tag_hello.part0.txt - - -|======= -| ls.s3 | indicate logstash plugin s3 | -| 312bc026-2f5d-49bc-ae9f-5940cf4ad9a6 | a new, random uuid per file. | -| 2013-04-18T10.00 | represents the time whenever you specify time_file. | -| tag_hello | this indicates the event's tag. | -| part0 | this means if you indicate size_file then it will generate more parts if you file.size > size_file. When a file is full it will be pushed to the bucket and then deleted from the temporary directory. If a file is empty, it is simply deleted. Empty files will not be pushed | -|======= - -Crash Recovery: -* This plugin will recover and upload temporary log files after crash/abnormal termination when using `restore` set to true - - - - - - - -#### Usage: -This is an example of logstash config: -[source,ruby] -output { - s3{ - access_key_id => "crazy_key" (required) - secret_access_key => "monkey_access_key" (required) - region => "eu-west-1" (optional, default = "us-east-1") - bucket => "your_bucket" (required) - size_file => 2048 (optional) - Bytes - time_file => 5 (optional) - Minutes - codec => "plain" (optional) - canned_acl => "private" (optional. Options are "private", "public-read", "public-read-write", "authenticated-read". Defaults to "private" ) - } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== S3 Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-canned_acl>> |<>, one of `["private", "public-read", "public-read-write", "authenticated-read"]`|No -| <<{version}-plugins-{type}s-{plugin}-encoding>> |<>, one of `["none", "gzip"]`|No -| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-restore>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-rotation_strategy>> |<>, one of `["size_and_time", "size", "time"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-server_side_encryption>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-server_side_encryption_algorithm>> |<>, one of `["AES256", "aws:kms"]`|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-signature_version>> |<>, one of `["v2", "v4"]`|No -| <<{version}-plugins-{type}s-{plugin}-size_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssekms_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-storage_class>> |<>, one of `["STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA"]`|No -| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-time_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upload_queue_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upload_workers_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_credentials_on_root_bucket>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-bucket"] -===== `bucket` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -S3 bucket - -[id="{version}-plugins-{type}s-{plugin}-canned_acl"] -===== `canned_acl` - - * Value can be any of: `private`, `public-read`, `public-read-write`, `authenticated-read` - * Default value is `"private"` - -The S3 canned ACL to use when putting the file. Defaults to "private". - -[id="{version}-plugins-{type}s-{plugin}-encoding"] -===== `encoding` - - * Value can be any of: `none`, `gzip` - * Default value is `"none"` - -Specify the content encoding. Supports ("gzip"). Defaults to "none" - -[id="{version}-plugins-{type}s-{plugin}-prefix"] -===== `prefix` - - * Value type is <> - * Default value is `""` - -Specify a prefix to the uploaded filename, this can simulate directories on S3. Prefix does not require leading slash. -This option support string interpolation, be warned this can created a lot of temporary local files. - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-restore"] -===== `restore` - - * Value type is <> - * Default value is `true` - - - -[id="{version}-plugins-{type}s-{plugin}-rotation_strategy"] -===== `rotation_strategy` - - * Value can be any of: `size_and_time`, `size`, `time` - * Default value is `"size_and_time"` - -Define the strategy to use to decide when we need to rotate the file and push it to S3, -The default strategy is to check for both size and time, the first one to match will rotate the file. - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-server_side_encryption"] -===== `server_side_encryption` - - * Value type is <> - * Default value is `false` - -Specifies wether or not to use S3's server side encryption. Defaults to no encryption. - -[id="{version}-plugins-{type}s-{plugin}-server_side_encryption_algorithm"] -===== `server_side_encryption_algorithm` - - * Value can be any of: `AES256`, `aws:kms` - * Default value is `"AES256"` - -Specifies what type of encryption to use when SSE is enabled. - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - -[id="{version}-plugins-{type}s-{plugin}-signature_version"] -===== `signature_version` - - * Value can be any of: `v2`, `v4` - * There is no default value for this setting. - -The version of the S3 signature hash to use. Normally uses the internal client default, can be explicitly -specified here - -[id="{version}-plugins-{type}s-{plugin}-size_file"] -===== `size_file` - - * Value type is <> - * Default value is `5242880` - -Set the size of file in bytes, this means that files on bucket when have dimension > file_size, they are stored in two or more file. -If you have tags then it will generate a specific size file for every tags - -[id="{version}-plugins-{type}s-{plugin}-ssekms_key_id"] -===== `ssekms_key_id` - - * Value type is <> - * There is no default value for this setting. - -The key to use when specified along with server_side_encryption => aws:kms. -If server_side_encryption => aws:kms is set but this is not default KMS key is used. -http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html - -[id="{version}-plugins-{type}s-{plugin}-storage_class"] -===== `storage_class` - - * Value can be any of: `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA` - * Default value is `"STANDARD"` - -Specifies what S3 storage class to use when uploading the file. -More information about the different storage classes can be found: -http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html -Defaults to STANDARD. - -[id="{version}-plugins-{type}s-{plugin}-temporary_directory"] -===== `temporary_directory` - - * Value type is <> - * Default value is `"/tmp/logstash"` - -Set the directory where logstash will store the tmp files before sending it to S3 -default to the current OS temporary directory in linux /tmp/logstash - -[id="{version}-plugins-{type}s-{plugin}-time_file"] -===== `time_file` - - * Value type is <> - * Default value is `15` - -Set the time, in MINUTES, to close the current sub_time_section of bucket. -If you define file_size you have a number of files in consideration of the section and the current tag. -0 stay all time on listerner, beware if you specific 0 and size_file 0, because you will not put the file on bucket, -for now the only thing this plugin can do is to put the file when logstash restart. - -[id="{version}-plugins-{type}s-{plugin}-upload_queue_size"] -===== `upload_queue_size` - - * Value type is <> - * Default value is `4` - -Number of items we can keep in the local queue before uploading them - -[id="{version}-plugins-{type}s-{plugin}-upload_workers_count"] -===== `upload_workers_count` - - * Value type is <> - * Default value is `4` - -Specify how many workers to use to upload the files to S3 - -[id="{version}-plugins-{type}s-{plugin}-validate_credentials_on_root_bucket"] -===== `validate_credentials_on_root_bucket` - - * Value type is <> - * Default value is `true` - -The common use case is to define permission on the root bucket and give Logstash full access to write its logs. -In some circonstances you need finer grained permission on subfolder, this allow you to disable the check at startup. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/s3-v4.0.9.asciidoc b/docs/versioned-plugins/outputs/s3-v4.0.9.asciidoc deleted file mode 100644 index 7ef2b23cc..000000000 --- a/docs/versioned-plugins/outputs/s3-v4.0.9.asciidoc +++ /dev/null @@ -1,327 +0,0 @@ -:plugin: s3 -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.9 -:release_date: 2017-07-19 -:changelog_url: https://github.com/logstash-plugins/logstash-output-s3/blob/v4.0.9/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== S3 output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -INFORMATION: - -This plugin batches and uploads logstash events into Amazon Simple Storage Service (Amazon S3). - -Requirements: -* Amazon S3 Bucket and S3 Access Permissions (Typically access_key_id and secret_access_key) -* S3 PutObject permission - -S3 outputs create temporary files into the OS' temporary directory, you can specify where to save them using the `temporary_directory` option. - -S3 output files have the following format - -ls.s3.312bc026-2f5d-49bc-ae9f-5940cf4ad9a6.2013-04-18T10.00.tag_hello.part0.txt - - -|======= -| ls.s3 | indicate logstash plugin s3 | -| 312bc026-2f5d-49bc-ae9f-5940cf4ad9a6 | a new, random uuid per file. | -| 2013-04-18T10.00 | represents the time whenever you specify time_file. | -| tag_hello | this indicates the event's tag. | -| part0 | this means if you indicate size_file then it will generate more parts if you file.size > size_file. When a file is full it will be pushed to the bucket and then deleted from the temporary directory. If a file is empty, it is simply deleted. Empty files will not be pushed | -|======= - -Crash Recovery: -* This plugin will recover and upload temporary log files after crash/abnormal termination when using `restore` set to true - - - - - - - -#### Usage: -This is an example of logstash config: -[source,ruby] -output { - s3{ - access_key_id => "crazy_key" (required) - secret_access_key => "monkey_access_key" (required) - region => "eu-west-1" (optional, default = "us-east-1") - bucket => "your_bucket" (required) - size_file => 2048 (optional) - Bytes - time_file => 5 (optional) - Minutes - codec => "plain" (optional) - canned_acl => "private" (optional. Options are "private", "public-read", "public-read-write", "authenticated-read". Defaults to "private" ) - } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== S3 Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-canned_acl>> |<>, one of `["private", "public-read", "public-read-write", "authenticated-read"]`|No -| <<{version}-plugins-{type}s-{plugin}-encoding>> |<>, one of `["none", "gzip"]`|No -| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-restore>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-rotation_strategy>> |<>, one of `["size_and_time", "size", "time"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-server_side_encryption>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-server_side_encryption_algorithm>> |<>, one of `["AES256", "aws:kms"]`|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-signature_version>> |<>, one of `["v2", "v4"]`|No -| <<{version}-plugins-{type}s-{plugin}-size_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssekms_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-storage_class>> |<>, one of `["STANDARD", "REDUCED_REDUNDANCY", "STANDARD_IA"]`|No -| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-time_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upload_queue_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upload_workers_count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_credentials_on_root_bucket>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-bucket"] -===== `bucket` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -S3 bucket - -[id="{version}-plugins-{type}s-{plugin}-canned_acl"] -===== `canned_acl` - - * Value can be any of: `private`, `public-read`, `public-read-write`, `authenticated-read` - * Default value is `"private"` - -The S3 canned ACL to use when putting the file. Defaults to "private". - -[id="{version}-plugins-{type}s-{plugin}-encoding"] -===== `encoding` - - * Value can be any of: `none`, `gzip` - * Default value is `"none"` - -Specify the content encoding. Supports ("gzip"). Defaults to "none" - -[id="{version}-plugins-{type}s-{plugin}-prefix"] -===== `prefix` - - * Value type is <> - * Default value is `""` - -Specify a prefix to the uploaded filename, this can simulate directories on S3. Prefix does not require leading slash. -This option support string interpolation, be warned this can created a lot of temporary local files. - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-restore"] -===== `restore` - - * Value type is <> - * Default value is `true` - - - -[id="{version}-plugins-{type}s-{plugin}-rotation_strategy"] -===== `rotation_strategy` - - * Value can be any of: `size_and_time`, `size`, `time` - * Default value is `"size_and_time"` - -Define the strategy to use to decide when we need to rotate the file and push it to S3, -The default strategy is to check for both size and time, the first one to match will rotate the file. - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-server_side_encryption"] -===== `server_side_encryption` - - * Value type is <> - * Default value is `false` - -Specifies wether or not to use S3's server side encryption. Defaults to no encryption. - -[id="{version}-plugins-{type}s-{plugin}-server_side_encryption_algorithm"] -===== `server_side_encryption_algorithm` - - * Value can be any of: `AES256`, `aws:kms` - * Default value is `"AES256"` - -Specifies what type of encryption to use when SSE is enabled. - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - -[id="{version}-plugins-{type}s-{plugin}-signature_version"] -===== `signature_version` - - * Value can be any of: `v2`, `v4` - * There is no default value for this setting. - -The version of the S3 signature hash to use. Normally uses the internal client default, can be explicitly -specified here - -[id="{version}-plugins-{type}s-{plugin}-size_file"] -===== `size_file` - - * Value type is <> - * Default value is `5242880` - -Set the size of file in bytes, this means that files on bucket when have dimension > file_size, they are stored in two or more file. -If you have tags then it will generate a specific size file for every tags - -[id="{version}-plugins-{type}s-{plugin}-ssekms_key_id"] -===== `ssekms_key_id` - - * Value type is <> - * There is no default value for this setting. - -The key to use when specified along with server_side_encryption => aws:kms. -If server_side_encryption => aws:kms is set but this is not default KMS key is used. -http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html - -[id="{version}-plugins-{type}s-{plugin}-storage_class"] -===== `storage_class` - - * Value can be any of: `STANDARD`, `REDUCED_REDUNDANCY`, `STANDARD_IA` - * Default value is `"STANDARD"` - -Specifies what S3 storage class to use when uploading the file. -More information about the different storage classes can be found: -http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html -Defaults to STANDARD. - -[id="{version}-plugins-{type}s-{plugin}-temporary_directory"] -===== `temporary_directory` - - * Value type is <> - * Default value is `"/tmp/logstash"` - -Set the directory where logstash will store the tmp files before sending it to S3 -default to the current OS temporary directory in linux /tmp/logstash - -[id="{version}-plugins-{type}s-{plugin}-time_file"] -===== `time_file` - - * Value type is <> - * Default value is `15` - -Set the time, in MINUTES, to close the current sub_time_section of bucket. -If you define file_size you have a number of files in consideration of the section and the current tag. -0 stay all time on listerner, beware if you specific 0 and size_file 0, because you will not put the file on bucket, -for now the only thing this plugin can do is to put the file when logstash restart. - -[id="{version}-plugins-{type}s-{plugin}-upload_queue_size"] -===== `upload_queue_size` - - * Value type is <> - * Default value is `4` - -Number of items we can keep in the local queue before uploading them - -[id="{version}-plugins-{type}s-{plugin}-upload_workers_count"] -===== `upload_workers_count` - - * Value type is <> - * Default value is `4` - -Specify how many workers to use to upload the files to S3 - -[id="{version}-plugins-{type}s-{plugin}-validate_credentials_on_root_bucket"] -===== `validate_credentials_on_root_bucket` - - * Value type is <> - * Default value is `true` - -The common use case is to define permission on the root bucket and give Logstash full access to write its logs. -In some circonstances you need finer grained permission on subfolder, this allow you to disable the check at startup. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/slack-index.asciidoc b/docs/versioned-plugins/outputs/slack-index.asciidoc deleted file mode 100644 index c0a342b76..000000000 --- a/docs/versioned-plugins/outputs/slack-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: slack -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-12-22 -| <> | 2017-06-23 -|======================================================================= - -include::slack-v2.1.0.asciidoc[] -include::slack-v2.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/slack-v2.0.3.asciidoc b/docs/versioned-plugins/outputs/slack-v2.0.3.asciidoc deleted file mode 100644 index 9417316ed..000000000 --- a/docs/versioned-plugins/outputs/slack-v2.0.3.asciidoc +++ /dev/null @@ -1,107 +0,0 @@ -:plugin: slack -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-slack/blob/v2.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Slack output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Slack Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-attachments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-channel>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-icon_emoji>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-icon_url>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-attachments"] -===== `attachments` - - * Value type is <> - * There is no default value for this setting. - -Attachments array as described https://api.slack.com/docs/attachments - -[id="{version}-plugins-{type}s-{plugin}-channel"] -===== `channel` - - * Value type is <> - * There is no default value for this setting. - -The channel to post to - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * Default value is `"%{message}"` - -The text to post in slack - -[id="{version}-plugins-{type}s-{plugin}-icon_emoji"] -===== `icon_emoji` - - * Value type is <> - * There is no default value for this setting. - -Emoji icon to use - -[id="{version}-plugins-{type}s-{plugin}-icon_url"] -===== `icon_url` - - * Value type is <> - * There is no default value for this setting. - -Icon URL to use - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The incoming webhook URI needed to post a message - -[id="{version}-plugins-{type}s-{plugin}-username"] -===== `username` - - * Value type is <> - * There is no default value for this setting. - -The username to use for posting - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/slack-v2.1.0.asciidoc b/docs/versioned-plugins/outputs/slack-v2.1.0.asciidoc deleted file mode 100644 index c012cd548..000000000 --- a/docs/versioned-plugins/outputs/slack-v2.1.0.asciidoc +++ /dev/null @@ -1,107 +0,0 @@ -:plugin: slack -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v2.1.0 -:release_date: 2017-12-22 -:changelog_url: https://github.com/logstash-plugins/logstash-output-slack/blob/v2.1.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Slack output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Slack Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-attachments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-channel>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-icon_emoji>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-icon_url>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-url>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-attachments"] -===== `attachments` - - * Value type is <> - * There is no default value for this setting. - -Attachments array as described https://api.slack.com/docs/attachments - -[id="{version}-plugins-{type}s-{plugin}-channel"] -===== `channel` - - * Value type is <> - * There is no default value for this setting. - -The channel to post to - -[id="{version}-plugins-{type}s-{plugin}-format"] -===== `format` - - * Value type is <> - * Default value is `"%{message}"` - -The text to post in slack - -[id="{version}-plugins-{type}s-{plugin}-icon_emoji"] -===== `icon_emoji` - - * Value type is <> - * There is no default value for this setting. - -Emoji icon to use - -[id="{version}-plugins-{type}s-{plugin}-icon_url"] -===== `icon_url` - - * Value type is <> - * There is no default value for this setting. - -Icon URL to use - -[id="{version}-plugins-{type}s-{plugin}-url"] -===== `url` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The incoming webhook URI needed to post a message - -[id="{version}-plugins-{type}s-{plugin}-username"] -===== `username` - - * Value type is <> - * There is no default value for this setting. - -The username to use for posting - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/sns-index.asciidoc b/docs/versioned-plugins/outputs/sns-index.asciidoc deleted file mode 100644 index a0ad6b3e3..000000000 --- a/docs/versioned-plugins/outputs/sns-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: sns -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::sns-v4.0.6.asciidoc[] -include::sns-v4.0.5.asciidoc[] -include::sns-v4.0.4.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/sns-v4.0.4.asciidoc b/docs/versioned-plugins/outputs/sns-v4.0.4.asciidoc deleted file mode 100644 index 54b021ffe..000000000 --- a/docs/versioned-plugins/outputs/sns-v4.0.4.asciidoc +++ /dev/null @@ -1,162 +0,0 @@ -:plugin: sns -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-sns/blob/v4.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Sns output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -SNS output. - -Send events to Amazon's Simple Notification Service, a hosted pub/sub -framework. It supports various subscription types, including email, HTTP/S, SMS, and SQS. - -For further documentation about the service see: - - http://docs.amazonwebservices.com/sns/latest/api/ - -This plugin looks for the following fields on events it receives: - - * `sns` - If no ARN is found in the configuration file, this will be used as - the ARN to publish. - * `sns_subject` - The subject line that should be used. - Optional. The "%{host}" will be used if `sns_subject` is not present. The subject - will be truncated to 100 characters. If `sns_subject` is set to a non-string value a JSON version of that value will be saved. - * `sns_message` - Optional string of message to be sent. If this is set to a non-string value it will be encoded with the specified `codec`. If this is not set the entire event will be encoded with the codec. - with the @message truncated so that the length of the JSON fits in - `32768` bytes. - -==== Upgrading to 2.0.0 - -This plugin used to have a `format` option for controlling the encoding of messages prior to being sent to SNS. -This plugin now uses the logstash standard <> option for encoding instead. -If you want the same 'plain' format as the v0/1 codec (`format => "plain"`) use `codec => "s3_plain"`. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Sns Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-arn>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-publish_boot_message_arn>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-arn"] -===== `arn` - - * Value type is <> - * There is no default value for this setting. - -Optional ARN to send messages to. If you do not set this you must -include the `sns` field in your events to set the ARN on a per-message basis! - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-publish_boot_message_arn"] -===== `publish_boot_message_arn` - - * Value type is <> - * There is no default value for this setting. - -When an ARN for an SNS topic is specified here, the message -"Logstash successfully booted" will be sent to it when this plugin -is registered. - -Example: arn:aws:sns:us-east-1:770975001275:logstash-testing - - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/sns-v4.0.5.asciidoc b/docs/versioned-plugins/outputs/sns-v4.0.5.asciidoc deleted file mode 100644 index ebc9fd852..000000000 --- a/docs/versioned-plugins/outputs/sns-v4.0.5.asciidoc +++ /dev/null @@ -1,162 +0,0 @@ -:plugin: sns -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.5 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-sns/blob/v4.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Sns output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -SNS output. - -Send events to Amazon's Simple Notification Service, a hosted pub/sub -framework. It supports various subscription types, including email, HTTP/S, SMS, and SQS. - -For further documentation about the service see: - - http://docs.amazonwebservices.com/sns/latest/api/ - -This plugin looks for the following fields on events it receives: - - * `sns` - If no ARN is found in the configuration file, this will be used as - the ARN to publish. - * `sns_subject` - The subject line that should be used. - Optional. The "%{host}" will be used if `sns_subject` is not present. The subject - will be truncated to 100 characters. If `sns_subject` is set to a non-string value a JSON version of that value will be saved. - * `sns_message` - Optional string of message to be sent. If this is set to a non-string value it will be encoded with the specified `codec`. If this is not set the entire event will be encoded with the codec. - with the @message truncated so that the length of the JSON fits in - `32768` bytes. - -==== Upgrading to 2.0.0 - -This plugin used to have a `format` option for controlling the encoding of messages prior to being sent to SNS. -This plugin now uses the logstash standard <> option for encoding instead. -If you want the same 'plain' format as the v0/1 codec (`format => "plain"`) use `codec => "s3_plain"`. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Sns Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-arn>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-publish_boot_message_arn>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-arn"] -===== `arn` - - * Value type is <> - * There is no default value for this setting. - -Optional ARN to send messages to. If you do not set this you must -include the `sns` field in your events to set the ARN on a per-message basis! - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-publish_boot_message_arn"] -===== `publish_boot_message_arn` - - * Value type is <> - * There is no default value for this setting. - -When an ARN for an SNS topic is specified here, the message -"Logstash successfully booted" will be sent to it when this plugin -is registered. - -Example: arn:aws:sns:us-east-1:770975001275:logstash-testing - - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/sns-v4.0.6.asciidoc b/docs/versioned-plugins/outputs/sns-v4.0.6.asciidoc deleted file mode 100644 index c65ed531f..000000000 --- a/docs/versioned-plugins/outputs/sns-v4.0.6.asciidoc +++ /dev/null @@ -1,162 +0,0 @@ -:plugin: sns -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.6 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-sns/blob/v4.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Sns output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -SNS output. - -Send events to Amazon's Simple Notification Service, a hosted pub/sub -framework. It supports various subscription types, including email, HTTP/S, SMS, and SQS. - -For further documentation about the service see: - - http://docs.amazonwebservices.com/sns/latest/api/ - -This plugin looks for the following fields on events it receives: - - * `sns` - If no ARN is found in the configuration file, this will be used as - the ARN to publish. - * `sns_subject` - The subject line that should be used. - Optional. The "%{host}" will be used if `sns_subject` is not present. The subject - will be truncated to 100 characters. If `sns_subject` is set to a non-string value a JSON version of that value will be saved. - * `sns_message` - Optional string of message to be sent. If this is set to a non-string value it will be encoded with the specified `codec`. If this is not set the entire event will be encoded with the codec. - with the @message truncated so that the length of the JSON fits in - `32768` bytes. - -==== Upgrading to 2.0.0 - -This plugin used to have a `format` option for controlling the encoding of messages prior to being sent to SNS. -This plugin now uses the logstash standard <> option for encoding instead. -If you want the same 'plain' format as the v0/1 codec (`format => "plain"`) use `codec => "s3_plain"`. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Sns Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-arn>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-publish_boot_message_arn>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-arn"] -===== `arn` - - * Value type is <> - * There is no default value for this setting. - -Optional ARN to send messages to. If you do not set this you must -include the `sns` field in your events to set the ARN on a per-message basis! - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-publish_boot_message_arn"] -===== `publish_boot_message_arn` - - * Value type is <> - * There is no default value for this setting. - -When an ARN for an SNS topic is specified here, the message -"Logstash successfully booted" will be sent to it when this plugin -is registered. - -Example: arn:aws:sns:us-east-1:770975001275:logstash-testing - - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/solr_http-index.asciidoc b/docs/versioned-plugins/outputs/solr_http-index.asciidoc deleted file mode 100644 index db7f717f1..000000000 --- a/docs/versioned-plugins/outputs/solr_http-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: solr_http -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::solr_http-v3.0.4.asciidoc[] -include::solr_http-v3.0.3.asciidoc[] -include::solr_http-v3.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/solr_http-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/solr_http-v3.0.2.asciidoc deleted file mode 100644 index 5320f43c9..000000000 --- a/docs/versioned-plugins/outputs/solr_http-v3.0.2.asciidoc +++ /dev/null @@ -1,92 +0,0 @@ -:plugin: solr_http -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-solr_http/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Solr_http output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you index&store your logs in Solr. If you want to get -started quickly you should use version 4.4 or above in schemaless mode, -which will try and guess your fields automatically. To turn that on, -you can use the example included in the Solr archive: -[source,shell] - tar zxf solr-4.4.0.tgz - cd example - mv solr solr_ #back up the existing sample conf - cp -r example-schemaless/solr/ . #put the schemaless conf in place - java -jar start.jar #start Solr - -You can learn more at https://lucene.apache.org/solr/[the Solr home page] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Solr_http Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-solr_url>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-document_id"] -===== `document_id` - - * Value type is <> - * Default value is `nil` - -Solr document ID for events. You'd typically have a variable here, like -'%{foo}' so you can assign your own IDs - -[id="{version}-plugins-{type}s-{plugin}-flush_size"] -===== `flush_size` - - * Value type is <> - * Default value is `100` - -Number of events to queue up before writing to Solr - -[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] -===== `idle_flush_time` - - * Value type is <> - * Default value is `1` - -Amount of time since the last flush before a flush is done even if -the number of buffered events is smaller than flush_size - -[id="{version}-plugins-{type}s-{plugin}-solr_url"] -===== `solr_url` - - * Value type is <> - * Default value is `"http://localhost:8983/solr"` - -URL used to connect to Solr - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/solr_http-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/solr_http-v3.0.3.asciidoc deleted file mode 100644 index f72395de1..000000000 --- a/docs/versioned-plugins/outputs/solr_http-v3.0.3.asciidoc +++ /dev/null @@ -1,92 +0,0 @@ -:plugin: solr_http -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-solr_http/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Solr_http output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you index&store your logs in Solr. If you want to get -started quickly you should use version 4.4 or above in schemaless mode, -which will try and guess your fields automatically. To turn that on, -you can use the example included in the Solr archive: -[source,shell] - tar zxf solr-4.4.0.tgz - cd example - mv solr solr_ #back up the existing sample conf - cp -r example-schemaless/solr/ . #put the schemaless conf in place - java -jar start.jar #start Solr - -You can learn more at https://lucene.apache.org/solr/[the Solr home page] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Solr_http Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-solr_url>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-document_id"] -===== `document_id` - - * Value type is <> - * Default value is `nil` - -Solr document ID for events. You'd typically have a variable here, like -'%{foo}' so you can assign your own IDs - -[id="{version}-plugins-{type}s-{plugin}-flush_size"] -===== `flush_size` - - * Value type is <> - * Default value is `100` - -Number of events to queue up before writing to Solr - -[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] -===== `idle_flush_time` - - * Value type is <> - * Default value is `1` - -Amount of time since the last flush before a flush is done even if -the number of buffered events is smaller than flush_size - -[id="{version}-plugins-{type}s-{plugin}-solr_url"] -===== `solr_url` - - * Value type is <> - * Default value is `"http://localhost:8983/solr"` - -URL used to connect to Solr - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/solr_http-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/solr_http-v3.0.4.asciidoc deleted file mode 100644 index fed11012c..000000000 --- a/docs/versioned-plugins/outputs/solr_http-v3.0.4.asciidoc +++ /dev/null @@ -1,92 +0,0 @@ -:plugin: solr_http -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-solr_http/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Solr_http output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output lets you index&store your logs in Solr. If you want to get -started quickly you should use version 4.4 or above in schemaless mode, -which will try and guess your fields automatically. To turn that on, -you can use the example included in the Solr archive: -[source,shell] - tar zxf solr-4.4.0.tgz - cd example - mv solr solr_ #back up the existing sample conf - cp -r example-schemaless/solr/ . #put the schemaless conf in place - java -jar start.jar #start Solr - -You can learn more at https://lucene.apache.org/solr/[the Solr home page] - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Solr_http Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-solr_url>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-document_id"] -===== `document_id` - - * Value type is <> - * Default value is `nil` - -Solr document ID for events. You'd typically have a variable here, like -'%{foo}' so you can assign your own IDs - -[id="{version}-plugins-{type}s-{plugin}-flush_size"] -===== `flush_size` - - * Value type is <> - * Default value is `100` - -Number of events to queue up before writing to Solr - -[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] -===== `idle_flush_time` - - * Value type is <> - * Default value is `1` - -Amount of time since the last flush before a flush is done even if -the number of buffered events is smaller than flush_size - -[id="{version}-plugins-{type}s-{plugin}-solr_url"] -===== `solr_url` - - * Value type is <> - * Default value is `"http://localhost:8983/solr"` - -URL used to connect to Solr - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/sqs-index.asciidoc b/docs/versioned-plugins/outputs/sqs-index.asciidoc deleted file mode 100644 index 2dc06d1de..000000000 --- a/docs/versioned-plugins/outputs/sqs-index.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -:plugin: sqs -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-08-01 -| <> | 2017-08-18 -| <> | 2017-06-23 -|======================================================================= - -include::sqs-v5.0.2.asciidoc[] -include::sqs-v5.0.1.asciidoc[] -include::sqs-v5.0.0.asciidoc[] -include::sqs-v4.0.3.asciidoc[] -include::sqs-v4.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/sqs-v4.0.2.asciidoc b/docs/versioned-plugins/outputs/sqs-v4.0.2.asciidoc deleted file mode 100644 index 206a23be9..000000000 --- a/docs/versioned-plugins/outputs/sqs-v4.0.2.asciidoc +++ /dev/null @@ -1,218 +0,0 @@ -:plugin: sqs -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-sqs/blob/v4.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Sqs output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push events to an Amazon Web Services (AWS) Simple Queue Service (SQS) queue. - -SQS is a simple, scalable queue system that is part of the Amazon Web -Services suite of tools. Although SQS is similar to other queuing systems -such as Advanced Message Queuing Protocol (AMQP), it uses a custom API and -requires that you have an AWS account. See http://aws.amazon.com/sqs/ for -more details on how SQS works, what the pricing schedule looks like and how -to setup a queue. - -The "consumer" identity must have the following permissions on the queue: - - * `sqs:GetQueueUrl` - * `sqs:SendMessage` - * `sqs:SendMessageBatch` - -Typically, you should setup an IAM policy, create a user and apply the IAM -policy to the user. See http://aws.amazon.com/iam/ for more details on -setting up AWS identities. A sample policy is as follows: - -[source,json] -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "sqs:GetQueueUrl", - "sqs:SendMessage", - "sqs:SendMessageBatch" - ], - "Resource": "arn:aws:sqs:us-east-1:123456789012:my-sqs-queue" - } - ] -} - -==== Batch Publishing -This output publishes messages to SQS in batches in order to optimize event -throughput and increase performance. This is done using the -[`SendMessageBatch`](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessageBatch.html) -API. When publishing messages to SQS in batches, the following service limits -must be respected (see -[Limits in Amazon SQS](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html)): - - * The maximum allowed individual message size is 256KiB. - * The maximum total payload size (i.e. the sum of the sizes of all - individual messages within a batch) is also 256KiB. - -This plugin will dynamically adjust the size of the batch published to SQS in -order to ensure that the total payload size does not exceed 256KiB. - -WARNING: This output cannot currently handle messages larger than 256KiB. Any -single message exceeding this size will be dropped. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Sqs Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_max_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-batch"] -===== `batch` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `true` - -Set to `true` to send messages to SQS in batches (with the -`SendMessageBatch` API) or `false` to send messages to SQS individually -(with the `SendMessage` API). The size of the batch is configurable via -`batch_events`. - -[id="{version}-plugins-{type}s-{plugin}-batch_events"] -===== `batch_events` - - * Value type is <> - * Default value is `10` - -The number of events to be sent in each batch. Set this to `1` to disable -the batch sending of messages. - -[id="{version}-plugins-{type}s-{plugin}-batch_timeout"] -===== `batch_timeout` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-message_max_size"] -===== `message_max_size` - - * Value type is <> - * Default value is `"256KiB"` - -The maximum number of bytes for any message sent to SQS. Messages exceeding -this size will be dropped. See -http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html. - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the target SQS queue. Note that this is just the name of the -queue, not the URL or ARN. - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/sqs-v4.0.3.asciidoc b/docs/versioned-plugins/outputs/sqs-v4.0.3.asciidoc deleted file mode 100644 index 87fe0e069..000000000 --- a/docs/versioned-plugins/outputs/sqs-v4.0.3.asciidoc +++ /dev/null @@ -1,218 +0,0 @@ -:plugin: sqs -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.3 -:release_date: 2017-08-18 -:changelog_url: https://github.com/logstash-plugins/logstash-output-sqs/blob/v4.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Sqs output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push events to an Amazon Web Services (AWS) Simple Queue Service (SQS) queue. - -SQS is a simple, scalable queue system that is part of the Amazon Web -Services suite of tools. Although SQS is similar to other queuing systems -such as Advanced Message Queuing Protocol (AMQP), it uses a custom API and -requires that you have an AWS account. See http://aws.amazon.com/sqs/ for -more details on how SQS works, what the pricing schedule looks like and how -to setup a queue. - -The "consumer" identity must have the following permissions on the queue: - - * `sqs:GetQueueUrl` - * `sqs:SendMessage` - * `sqs:SendMessageBatch` - -Typically, you should setup an IAM policy, create a user and apply the IAM -policy to the user. See http://aws.amazon.com/iam/ for more details on -setting up AWS identities. A sample policy is as follows: - -[source,json] -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "sqs:GetQueueUrl", - "sqs:SendMessage", - "sqs:SendMessageBatch" - ], - "Resource": "arn:aws:sqs:us-east-1:123456789012:my-sqs-queue" - } - ] -} - -==== Batch Publishing -This output publishes messages to SQS in batches in order to optimize event -throughput and increase performance. This is done using the -[`SendMessageBatch`](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessageBatch.html) -API. When publishing messages to SQS in batches, the following service limits -must be respected (see -[Limits in Amazon SQS](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html)): - - * The maximum allowed individual message size is 256KiB. - * The maximum total payload size (i.e. the sum of the sizes of all - individual messages within a batch) is also 256KiB. - -This plugin will dynamically adjust the size of the batch published to SQS in -order to ensure that the total payload size does not exceed 256KiB. - -WARNING: This output cannot currently handle messages larger than 256KiB. Any -single message exceeding this size will be dropped. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Sqs Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_max_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-batch"] -===== `batch` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * Default value is `true` - -Set to `true` to send messages to SQS in batches (with the -`SendMessageBatch` API) or `false` to send messages to SQS individually -(with the `SendMessage` API). The size of the batch is configurable via -`batch_events`. - -[id="{version}-plugins-{type}s-{plugin}-batch_events"] -===== `batch_events` - - * Value type is <> - * Default value is `10` - -The number of events to be sent in each batch. Set this to `1` to disable -the batch sending of messages. - -[id="{version}-plugins-{type}s-{plugin}-batch_timeout"] -===== `batch_timeout` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - - - -[id="{version}-plugins-{type}s-{plugin}-message_max_size"] -===== `message_max_size` - - * Value type is <> - * Default value is `"256KiB"` - -The maximum number of bytes for any message sent to SQS. Messages exceeding -this size will be dropped. See -http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html. - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the target SQS queue. Note that this is just the name of the -queue, not the URL or ARN. - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/sqs-v5.0.0.asciidoc b/docs/versioned-plugins/outputs/sqs-v5.0.0.asciidoc deleted file mode 100644 index 9904473b7..000000000 --- a/docs/versioned-plugins/outputs/sqs-v5.0.0.asciidoc +++ /dev/null @@ -1,197 +0,0 @@ -:plugin: sqs -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.0 -:release_date: 2017-08-01 -:changelog_url: https://github.com/logstash-plugins/logstash-output-sqs/blob/v5.0.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Sqs output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push events to an Amazon Web Services (AWS) Simple Queue Service (SQS) queue. - -SQS is a simple, scalable queue system that is part of the Amazon Web -Services suite of tools. Although SQS is similar to other queuing systems -such as Advanced Message Queuing Protocol (AMQP), it uses a custom API and -requires that you have an AWS account. See http://aws.amazon.com/sqs/ for -more details on how SQS works, what the pricing schedule looks like and how -to setup a queue. - -The "consumer" identity must have the following permissions on the queue: - - * `sqs:GetQueueUrl` - * `sqs:SendMessage` - * `sqs:SendMessageBatch` - -Typically, you should setup an IAM policy, create a user and apply the IAM -policy to the user. See http://aws.amazon.com/iam/ for more details on -setting up AWS identities. A sample policy is as follows: - -[source,json] -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "sqs:GetQueueUrl", - "sqs:SendMessage", - "sqs:SendMessageBatch" - ], - "Resource": "arn:aws:sqs:us-east-1:123456789012:my-sqs-queue" - } - ] -} - -==== Batch Publishing -This output publishes messages to SQS in batches in order to optimize event -throughput and increase performance. This is done using the -[`SendMessageBatch`](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessageBatch.html) -API. When publishing messages to SQS in batches, the following service limits -must be respected (see -[Limits in Amazon SQS](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html)): - - * The maximum allowed individual message size is 256KiB. - * The maximum total payload size (i.e. the sum of the sizes of all - individual messages within a batch) is also 256KiB. - -This plugin will dynamically adjust the size of the batch published to SQS in -order to ensure that the total payload size does not exceed 256KiB. - -WARNING: This output cannot currently handle messages larger than 256KiB. Any -single message exceeding this size will be dropped. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Sqs Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_max_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-batch_events"] -===== `batch_events` - - * Value type is <> - * Default value is `10` - -The number of events to be sent in each batch. Set this to `1` to disable -the batch sending of messages. - -[id="{version}-plugins-{type}s-{plugin}-message_max_size"] -===== `message_max_size` - - * Value type is <> - * Default value is `"256KiB"` - -The maximum number of bytes for any message sent to SQS. Messages exceeding -this size will be dropped. See -http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html. - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the target SQS queue. Note that this is just the name of the -queue, not the URL or ARN. - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/sqs-v5.0.1.asciidoc b/docs/versioned-plugins/outputs/sqs-v5.0.1.asciidoc deleted file mode 100644 index 0faef1ff6..000000000 --- a/docs/versioned-plugins/outputs/sqs-v5.0.1.asciidoc +++ /dev/null @@ -1,197 +0,0 @@ -:plugin: sqs -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.1 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-sqs/blob/v5.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Sqs output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push events to an Amazon Web Services (AWS) Simple Queue Service (SQS) queue. - -SQS is a simple, scalable queue system that is part of the Amazon Web -Services suite of tools. Although SQS is similar to other queuing systems -such as Advanced Message Queuing Protocol (AMQP), it uses a custom API and -requires that you have an AWS account. See http://aws.amazon.com/sqs/ for -more details on how SQS works, what the pricing schedule looks like and how -to setup a queue. - -The "consumer" identity must have the following permissions on the queue: - - * `sqs:GetQueueUrl` - * `sqs:SendMessage` - * `sqs:SendMessageBatch` - -Typically, you should setup an IAM policy, create a user and apply the IAM -policy to the user. See http://aws.amazon.com/iam/ for more details on -setting up AWS identities. A sample policy is as follows: - -[source,json] -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "sqs:GetQueueUrl", - "sqs:SendMessage", - "sqs:SendMessageBatch" - ], - "Resource": "arn:aws:sqs:us-east-1:123456789012:my-sqs-queue" - } - ] -} - -==== Batch Publishing -This output publishes messages to SQS in batches in order to optimize event -throughput and increase performance. This is done using the -[`SendMessageBatch`](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessageBatch.html) -API. When publishing messages to SQS in batches, the following service limits -must be respected (see -[Limits in Amazon SQS](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html)): - - * The maximum allowed individual message size is 256KiB. - * The maximum total payload size (i.e. the sum of the sizes of all - individual messages within a batch) is also 256KiB. - -This plugin will dynamically adjust the size of the batch published to SQS in -order to ensure that the total payload size does not exceed 256KiB. - -WARNING: This output cannot currently handle messages larger than 256KiB. Any -single message exceeding this size will be dropped. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Sqs Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_max_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-batch_events"] -===== `batch_events` - - * Value type is <> - * Default value is `10` - -The number of events to be sent in each batch. Set this to `1` to disable -the batch sending of messages. - -[id="{version}-plugins-{type}s-{plugin}-message_max_size"] -===== `message_max_size` - - * Value type is <> - * Default value is `"256KiB"` - -The maximum number of bytes for any message sent to SQS. Messages exceeding -this size will be dropped. See -http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html. - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the target SQS queue. Note that this is just the name of the -queue, not the URL or ARN. - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/sqs-v5.0.2.asciidoc b/docs/versioned-plugins/outputs/sqs-v5.0.2.asciidoc deleted file mode 100644 index bbf41b62b..000000000 --- a/docs/versioned-plugins/outputs/sqs-v5.0.2.asciidoc +++ /dev/null @@ -1,197 +0,0 @@ -:plugin: sqs -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.2 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-sqs/blob/v5.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Sqs output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Push events to an Amazon Web Services (AWS) Simple Queue Service (SQS) queue. - -SQS is a simple, scalable queue system that is part of the Amazon Web -Services suite of tools. Although SQS is similar to other queuing systems -such as Advanced Message Queuing Protocol (AMQP), it uses a custom API and -requires that you have an AWS account. See http://aws.amazon.com/sqs/ for -more details on how SQS works, what the pricing schedule looks like and how -to setup a queue. - -The "consumer" identity must have the following permissions on the queue: - - * `sqs:GetQueueUrl` - * `sqs:SendMessage` - * `sqs:SendMessageBatch` - -Typically, you should setup an IAM policy, create a user and apply the IAM -policy to the user. See http://aws.amazon.com/iam/ for more details on -setting up AWS identities. A sample policy is as follows: - -[source,json] -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "sqs:GetQueueUrl", - "sqs:SendMessage", - "sqs:SendMessageBatch" - ], - "Resource": "arn:aws:sqs:us-east-1:123456789012:my-sqs-queue" - } - ] -} - -==== Batch Publishing -This output publishes messages to SQS in batches in order to optimize event -throughput and increase performance. This is done using the -[`SendMessageBatch`](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_SendMessageBatch.html) -API. When publishing messages to SQS in batches, the following service limits -must be respected (see -[Limits in Amazon SQS](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html)): - - * The maximum allowed individual message size is 256KiB. - * The maximum total payload size (i.e. the sum of the sizes of all - individual messages within a batch) is also 256KiB. - -This plugin will dynamically adjust the size of the batch published to SQS in -order to ensure that the total payload size does not exceed 256KiB. - -WARNING: This output cannot currently handle messages larger than 256KiB. Any -single message exceeding this size will be dropped. - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Sqs Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-batch_events>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message_max_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-queue>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-access_key_id"] -===== `access_key_id` - - * Value type is <> - * There is no default value for this setting. - -This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: - -1. Static configuration, using `access_key_id` and `secret_access_key` params in logstash plugin config -2. External credentials file specified by `aws_credentials_file` -3. Environment variables `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` -4. Environment variables `AMAZON_ACCESS_KEY_ID` and `AMAZON_SECRET_ACCESS_KEY` -5. IAM Instance Profile (available when running inside EC2) - -[id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] -===== `aws_credentials_file` - - * Value type is <> - * There is no default value for this setting. - -Path to YAML file containing a hash of AWS credentials. -This file will only be loaded if `access_key_id` and -`secret_access_key` aren't set. The contents of the -file should look like this: - -[source,ruby] ----------------------------------- - :access_key_id: "12345" - :secret_access_key: "54321" ----------------------------------- - - -[id="{version}-plugins-{type}s-{plugin}-batch_events"] -===== `batch_events` - - * Value type is <> - * Default value is `10` - -The number of events to be sent in each batch. Set this to `1` to disable -the batch sending of messages. - -[id="{version}-plugins-{type}s-{plugin}-message_max_size"] -===== `message_max_size` - - * Value type is <> - * Default value is `"256KiB"` - -The maximum number of bytes for any message sent to SQS. Messages exceeding -this size will be dropped. See -http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/limits-messages.html. - -[id="{version}-plugins-{type}s-{plugin}-proxy_uri"] -===== `proxy_uri` - - * Value type is <> - * There is no default value for this setting. - -URI to proxy server if required - -[id="{version}-plugins-{type}s-{plugin}-queue"] -===== `queue` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The name of the target SQS queue. Note that this is just the name of the -queue, not the URL or ARN. - -[id="{version}-plugins-{type}s-{plugin}-region"] -===== `region` - - * Value can be any of: `us-east-1`, `us-east-2`, `us-west-1`, `us-west-2`, `eu-central-1`, `eu-west-1`, `eu-west-2`, `ap-southeast-1`, `ap-southeast-2`, `ap-northeast-1`, `ap-northeast-2`, `sa-east-1`, `us-gov-west-1`, `cn-north-1`, `ap-south-1`, `ca-central-1` - * Default value is `"us-east-1"` - -The AWS Region - -[id="{version}-plugins-{type}s-{plugin}-secret_access_key"] -===== `secret_access_key` - - * Value type is <> - * There is no default value for this setting. - -The AWS Secret Access Key - -[id="{version}-plugins-{type}s-{plugin}-session_token"] -===== `session_token` - - * Value type is <> - * There is no default value for this setting. - -The AWS Session token for temporary credential - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/statsd-index.asciidoc b/docs/versioned-plugins/outputs/statsd-index.asciidoc deleted file mode 100644 index 297baedf6..000000000 --- a/docs/versioned-plugins/outputs/statsd-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: statsd -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::statsd-v3.1.4.asciidoc[] -include::statsd-v3.1.3.asciidoc[] -include::statsd-v3.1.2.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/statsd-v3.1.2.asciidoc b/docs/versioned-plugins/outputs/statsd-v3.1.2.asciidoc deleted file mode 100644 index 25627c427..000000000 --- a/docs/versioned-plugins/outputs/statsd-v3.1.2.asciidoc +++ /dev/null @@ -1,193 +0,0 @@ -:plugin: statsd -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-statsd/blob/v3.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Statsd output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -statsd is a network daemon for aggregating statistics, such as counters and timers, -and shipping over UDP to backend services, such as Graphite or Datadog. The general -idea is that you send metrics to statsd and every few seconds it will emit the -aggregated values to the backend. Example aggregates are sums, average and maximum -values, their standard deviation, etc. This plugin makes it easy to send such -metrics based on data in Logstash events. - -You can learn about statsd here: - -* https://codeascraft.com/2011/02/15/measure-anything-measure-everything/[Etsy blog post announcing statsd] -* https://github.com/etsy/statsd[statsd on github] - -Typical examples of how this can be used with Logstash include counting HTTP hits -by response code, summing the total number of bytes of traffic served, and tracking -the 50th and 95th percentile of the processing time of requests. - -Each metric emitted to statsd has a dot-separated path, a type, and a value. The -metric path is built from the `namespace` and `sender` options together with the -metric name that's picked up depending on the type of metric. All in all, the -metric path will follow this pattern: - - namespace.sender.metric - -With regards to this plugin, the default namespace is "logstash", the default -sender is the `host` field, and the metric name depends on what is set as the -metric name in the `increment`, `decrement`, `timing`, `count`, `set` or `gauge` -options. In metric paths, colons (":"), pipes ("|") and at signs ("@") are reserved -and will be replaced by underscores ("_"). - -Example: -[source,ruby] -output { - statsd { - host => "statsd.example.org" - count => { - "http.bytes" => "%{bytes}" - } - } -} - -If run on a host named hal9000 the configuration above will send the following -metric to statsd if the current event has 123 in its `bytes` field: - - logstash.hal9000.http.bytes:123|c - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Statsd Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-decrement>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-increment>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sample_rate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-set>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timing>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-count"] -===== `count` - - * Value type is <> - * Default value is `{}` - -A count metric. `metric_name => count` as hash. `%{fieldname}` substitutions are -allowed in the metric names. - -[id="{version}-plugins-{type}s-{plugin}-decrement"] -===== `decrement` - - * Value type is <> - * Default value is `[]` - -A decrement metric. Metric names as array. `%{fieldname}` substitutions are -allowed in the metric names. - -[id="{version}-plugins-{type}s-{plugin}-gauge"] -===== `gauge` - - * Value type is <> - * Default value is `{}` - -A gauge metric. `metric_name => gauge` as hash. `%{fieldname}` substitutions are -allowed in the metric names. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The hostname or IP address of the statsd server. - -[id="{version}-plugins-{type}s-{plugin}-increment"] -===== `increment` - - * Value type is <> - * Default value is `[]` - -An increment metric. Metric names as array. `%{fieldname}` substitutions are -allowed in the metric names. - -[id="{version}-plugins-{type}s-{plugin}-namespace"] -===== `namespace` - - * Value type is <> - * Default value is `"logstash"` - -The statsd namespace to use for this metric. `%{fieldname}` substitutions are -allowed. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `8125` - -The port to connect to on your statsd server. - -[id="{version}-plugins-{type}s-{plugin}-sample_rate"] -===== `sample_rate` - - * Value type is <> - * Default value is `1` - -The sample rate for the metric. - -[id="{version}-plugins-{type}s-{plugin}-sender"] -===== `sender` - - * Value type is <> - * Default value is `"%{host}"` - -The name of the sender. Dots will be replaced with underscores. `%{fieldname}` -substitutions are allowed. - -[id="{version}-plugins-{type}s-{plugin}-set"] -===== `set` - - * Value type is <> - * Default value is `{}` - -A set metric. `metric_name => "string"` to append as hash. `%{fieldname}` -substitutions are allowed in the metric names. - -[id="{version}-plugins-{type}s-{plugin}-timing"] -===== `timing` - - * Value type is <> - * Default value is `{}` - -A timing metric. `metric_name => duration` as hash. `%{fieldname}` substitutions -are allowed in the metric names. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/statsd-v3.1.3.asciidoc b/docs/versioned-plugins/outputs/statsd-v3.1.3.asciidoc deleted file mode 100644 index f9129b75a..000000000 --- a/docs/versioned-plugins/outputs/statsd-v3.1.3.asciidoc +++ /dev/null @@ -1,193 +0,0 @@ -:plugin: statsd -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-statsd/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Statsd output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -statsd is a network daemon for aggregating statistics, such as counters and timers, -and shipping over UDP to backend services, such as Graphite or Datadog. The general -idea is that you send metrics to statsd and every few seconds it will emit the -aggregated values to the backend. Example aggregates are sums, average and maximum -values, their standard deviation, etc. This plugin makes it easy to send such -metrics based on data in Logstash events. - -You can learn about statsd here: - -* https://codeascraft.com/2011/02/15/measure-anything-measure-everything/[Etsy blog post announcing statsd] -* https://github.com/etsy/statsd[statsd on github] - -Typical examples of how this can be used with Logstash include counting HTTP hits -by response code, summing the total number of bytes of traffic served, and tracking -the 50th and 95th percentile of the processing time of requests. - -Each metric emitted to statsd has a dot-separated path, a type, and a value. The -metric path is built from the `namespace` and `sender` options together with the -metric name that's picked up depending on the type of metric. All in all, the -metric path will follow this pattern: - - namespace.sender.metric - -With regards to this plugin, the default namespace is "logstash", the default -sender is the `host` field, and the metric name depends on what is set as the -metric name in the `increment`, `decrement`, `timing`, `count`, `set` or `gauge` -options. In metric paths, colons (":"), pipes ("|") and at signs ("@") are reserved -and will be replaced by underscores ("_"). - -Example: -[source,ruby] -output { - statsd { - host => "statsd.example.org" - count => { - "http.bytes" => "%{bytes}" - } - } -} - -If run on a host named hal9000 the configuration above will send the following -metric to statsd if the current event has 123 in its `bytes` field: - - logstash.hal9000.http.bytes:123|c - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Statsd Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-decrement>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-increment>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sample_rate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-set>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timing>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-count"] -===== `count` - - * Value type is <> - * Default value is `{}` - -A count metric. `metric_name => count` as hash. `%{fieldname}` substitutions are -allowed in the metric names. - -[id="{version}-plugins-{type}s-{plugin}-decrement"] -===== `decrement` - - * Value type is <> - * Default value is `[]` - -A decrement metric. Metric names as array. `%{fieldname}` substitutions are -allowed in the metric names. - -[id="{version}-plugins-{type}s-{plugin}-gauge"] -===== `gauge` - - * Value type is <> - * Default value is `{}` - -A gauge metric. `metric_name => gauge` as hash. `%{fieldname}` substitutions are -allowed in the metric names. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The hostname or IP address of the statsd server. - -[id="{version}-plugins-{type}s-{plugin}-increment"] -===== `increment` - - * Value type is <> - * Default value is `[]` - -An increment metric. Metric names as array. `%{fieldname}` substitutions are -allowed in the metric names. - -[id="{version}-plugins-{type}s-{plugin}-namespace"] -===== `namespace` - - * Value type is <> - * Default value is `"logstash"` - -The statsd namespace to use for this metric. `%{fieldname}` substitutions are -allowed. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `8125` - -The port to connect to on your statsd server. - -[id="{version}-plugins-{type}s-{plugin}-sample_rate"] -===== `sample_rate` - - * Value type is <> - * Default value is `1` - -The sample rate for the metric. - -[id="{version}-plugins-{type}s-{plugin}-sender"] -===== `sender` - - * Value type is <> - * Default value is `"%{host}"` - -The name of the sender. Dots will be replaced with underscores. `%{fieldname}` -substitutions are allowed. - -[id="{version}-plugins-{type}s-{plugin}-set"] -===== `set` - - * Value type is <> - * Default value is `{}` - -A set metric. `metric_name => "string"` to append as hash. `%{fieldname}` -substitutions are allowed in the metric names. - -[id="{version}-plugins-{type}s-{plugin}-timing"] -===== `timing` - - * Value type is <> - * Default value is `{}` - -A timing metric. `metric_name => duration` as hash. `%{fieldname}` substitutions -are allowed in the metric names. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/statsd-v3.1.4.asciidoc b/docs/versioned-plugins/outputs/statsd-v3.1.4.asciidoc deleted file mode 100644 index 5ec387217..000000000 --- a/docs/versioned-plugins/outputs/statsd-v3.1.4.asciidoc +++ /dev/null @@ -1,193 +0,0 @@ -:plugin: statsd -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.4 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-statsd/blob/v3.1.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Statsd output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -statsd is a network daemon for aggregating statistics, such as counters and timers, -and shipping over UDP to backend services, such as Graphite or Datadog. The general -idea is that you send metrics to statsd and every few seconds it will emit the -aggregated values to the backend. Example aggregates are sums, average and maximum -values, their standard deviation, etc. This plugin makes it easy to send such -metrics based on data in Logstash events. - -You can learn about statsd here: - -* https://codeascraft.com/2011/02/15/measure-anything-measure-everything/[Etsy blog post announcing statsd] -* https://github.com/etsy/statsd[statsd on github] - -Typical examples of how this can be used with Logstash include counting HTTP hits -by response code, summing the total number of bytes of traffic served, and tracking -the 50th and 95th percentile of the processing time of requests. - -Each metric emitted to statsd has a dot-separated path, a type, and a value. The -metric path is built from the `namespace` and `sender` options together with the -metric name that's picked up depending on the type of metric. All in all, the -metric path will follow this pattern: - - namespace.sender.metric - -With regards to this plugin, the default namespace is "logstash", the default -sender is the `host` field, and the metric name depends on what is set as the -metric name in the `increment`, `decrement`, `timing`, `count`, `set` or `gauge` -options. In metric paths, colons (":"), pipes ("|") and at signs ("@") are reserved -and will be replaced by underscores ("_"). - -Example: -[source,ruby] -output { - statsd { - host => "statsd.example.org" - count => { - "http.bytes" => "%{bytes}" - } - } -} - -If run on a host named hal9000 the configuration above will send the following -metric to statsd if the current event has 123 in its `bytes` field: - - logstash.hal9000.http.bytes:123|c - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Statsd Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-count>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-decrement>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gauge>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-increment>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-namespace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sample_rate>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sender>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-set>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timing>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-count"] -===== `count` - - * Value type is <> - * Default value is `{}` - -A count metric. `metric_name => count` as hash. `%{fieldname}` substitutions are -allowed in the metric names. - -[id="{version}-plugins-{type}s-{plugin}-decrement"] -===== `decrement` - - * Value type is <> - * Default value is `[]` - -A decrement metric. Metric names as array. `%{fieldname}` substitutions are -allowed in the metric names. - -[id="{version}-plugins-{type}s-{plugin}-gauge"] -===== `gauge` - - * Value type is <> - * Default value is `{}` - -A gauge metric. `metric_name => gauge` as hash. `%{fieldname}` substitutions are -allowed in the metric names. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"localhost"` - -The hostname or IP address of the statsd server. - -[id="{version}-plugins-{type}s-{plugin}-increment"] -===== `increment` - - * Value type is <> - * Default value is `[]` - -An increment metric. Metric names as array. `%{fieldname}` substitutions are -allowed in the metric names. - -[id="{version}-plugins-{type}s-{plugin}-namespace"] -===== `namespace` - - * Value type is <> - * Default value is `"logstash"` - -The statsd namespace to use for this metric. `%{fieldname}` substitutions are -allowed. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `8125` - -The port to connect to on your statsd server. - -[id="{version}-plugins-{type}s-{plugin}-sample_rate"] -===== `sample_rate` - - * Value type is <> - * Default value is `1` - -The sample rate for the metric. - -[id="{version}-plugins-{type}s-{plugin}-sender"] -===== `sender` - - * Value type is <> - * Default value is `"%{host}"` - -The name of the sender. Dots will be replaced with underscores. `%{fieldname}` -substitutions are allowed. - -[id="{version}-plugins-{type}s-{plugin}-set"] -===== `set` - - * Value type is <> - * Default value is `{}` - -A set metric. `metric_name => "string"` to append as hash. `%{fieldname}` -substitutions are allowed in the metric names. - -[id="{version}-plugins-{type}s-{plugin}-timing"] -===== `timing` - - * Value type is <> - * Default value is `{}` - -A timing metric. `metric_name => duration` as hash. `%{fieldname}` substitutions -are allowed in the metric names. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/stdout-index.asciidoc b/docs/versioned-plugins/outputs/stdout-index.asciidoc deleted file mode 100644 index 61f66c359..000000000 --- a/docs/versioned-plugins/outputs/stdout-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: stdout -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::stdout-v3.1.3.asciidoc[] -include::stdout-v3.1.2.asciidoc[] -include::stdout-v3.1.1.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/stdout-v3.1.1.asciidoc b/docs/versioned-plugins/outputs/stdout-v3.1.1.asciidoc deleted file mode 100644 index 6fb470bdb..000000000 --- a/docs/versioned-plugins/outputs/stdout-v3.1.1.asciidoc +++ /dev/null @@ -1,64 +0,0 @@ -:plugin: stdout -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-stdout/blob/v3.1.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Stdout output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -A simple output which prints to the STDOUT of the shell running -Logstash. This output can be quite convenient when debugging -plugin configurations, by allowing instant access to the event -data after it has passed through the inputs and filters. - -For example, the following output configuration, in conjunction with the -Logstash `-e` command-line flag, will allow you to see the results -of your event pipeline for quick iteration. -[source,ruby] - output { - stdout {} - } - -Useful codecs include: - -`rubydebug`: outputs event data using the ruby "awesome_print" -http://rubygems.org/gems/awesome_print[library] - -[source,ruby] - output { - stdout { codec => rubydebug } - } - -`json`: outputs event data in structured JSON format -[source,ruby] - output { - stdout { codec => json } - } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Stdout Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -|======================================================================= - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/stdout-v3.1.2.asciidoc b/docs/versioned-plugins/outputs/stdout-v3.1.2.asciidoc deleted file mode 100644 index b0ca17e6b..000000000 --- a/docs/versioned-plugins/outputs/stdout-v3.1.2.asciidoc +++ /dev/null @@ -1,60 +0,0 @@ -:plugin: stdout -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.2 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-stdout/blob/v3.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Stdout output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -A simple output which prints to the STDOUT of the shell running -Logstash. This output can be quite convenient when debugging -plugin configurations, by allowing instant access to the event -data after it has passed through the inputs and filters. - -For example, the following output configuration, in conjunction with the -Logstash `-e` command-line flag, will allow you to see the results -of your event pipeline for quick iteration. -[source,ruby] - output { - stdout {} - } - -Useful codecs include: - -`rubydebug`: outputs event data using the ruby "awesome_print" -http://rubygems.org/gems/awesome_print[library] - -[source,ruby] - output { - stdout { codec => rubydebug } - } - -`json`: outputs event data in structured JSON format -[source,ruby] - output { - stdout { codec => json } - } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Stdout Output Configuration Options - -There are no special configuration options for this plugin, -but it does support the <<{version}-plugins-{type}s-{plugin}-common-options>>. - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/stdout-v3.1.3.asciidoc b/docs/versioned-plugins/outputs/stdout-v3.1.3.asciidoc deleted file mode 100644 index c12bf9ca1..000000000 --- a/docs/versioned-plugins/outputs/stdout-v3.1.3.asciidoc +++ /dev/null @@ -1,60 +0,0 @@ -:plugin: stdout -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.3 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-stdout/blob/v3.1.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Stdout output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -A simple output which prints to the STDOUT of the shell running -Logstash. This output can be quite convenient when debugging -plugin configurations, by allowing instant access to the event -data after it has passed through the inputs and filters. - -For example, the following output configuration, in conjunction with the -Logstash `-e` command-line flag, will allow you to see the results -of your event pipeline for quick iteration. -[source,ruby] - output { - stdout {} - } - -Useful codecs include: - -`rubydebug`: outputs event data using the ruby "awesome_print" -http://rubygems.org/gems/awesome_print[library] - -[source,ruby] - output { - stdout { codec => rubydebug } - } - -`json`: outputs event data in structured JSON format -[source,ruby] - output { - stdout { codec => json } - } - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Stdout Output Configuration Options - -There are no special configuration options for this plugin, -but it does support the <<{version}-plugins-{type}s-{plugin}-common-options>>. - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/stomp-index.asciidoc b/docs/versioned-plugins/outputs/stomp-index.asciidoc deleted file mode 100644 index 500600f51..000000000 --- a/docs/versioned-plugins/outputs/stomp-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: stomp -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::stomp-v3.0.8.asciidoc[] -include::stomp-v3.0.7.asciidoc[] -include::stomp-v3.0.5.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/stomp-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/stomp-v3.0.5.asciidoc deleted file mode 100644 index 5e0a364ba..000000000 --- a/docs/versioned-plugins/outputs/stomp-v3.0.5.asciidoc +++ /dev/null @@ -1,123 +0,0 @@ -:plugin: stomp -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-stomp/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Stomp output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Stomp Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-debug>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-debug"] -===== `debug` - - * Value type is <> - * Default value is `false` - -Enable debugging output? - -[id="{version}-plugins-{type}s-{plugin}-destination"] -===== `destination` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The destination to read events from. Supports string expansion, meaning -`%{foo}` values will expand to the field value. - -Example: "/topic/logstash" - -[id="{version}-plugins-{type}s-{plugin}-headers"] -===== `headers` - - * Value type is <> - * There is no default value for this setting. - -Custom headers to send with each message. Supports string expansion, meaning -%{foo} values will expand to the field value. - -Example: headers => ["amq-msg-type", "text", "host", "%{host}"] - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The address of the STOMP server. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `""` - -The password to authenticate with. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `61613` - -The port to connect to on your STOMP server. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `""` - -The username to authenticate with. - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `nil` - -The vhost to use - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/stomp-v3.0.7.asciidoc b/docs/versioned-plugins/outputs/stomp-v3.0.7.asciidoc deleted file mode 100644 index 96be58564..000000000 --- a/docs/versioned-plugins/outputs/stomp-v3.0.7.asciidoc +++ /dev/null @@ -1,123 +0,0 @@ -:plugin: stomp -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.7 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-stomp/blob/v3.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Stomp output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output writes events using the STOMP protocol. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Stomp Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-debug>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-debug"] -===== `debug` - - * Value type is <> - * Default value is `false` - -Enable debugging output? - -[id="{version}-plugins-{type}s-{plugin}-destination"] -===== `destination` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The destination to read events from. Supports string expansion, meaning -`%{foo}` values will expand to the field value. - -Example: "/topic/logstash" - -[id="{version}-plugins-{type}s-{plugin}-headers"] -===== `headers` - - * Value type is <> - * There is no default value for this setting. - -Custom headers to send with each message. Supports string expansion, meaning -%{foo} values will expand to the field value. - -Example: headers => ["amq-msg-type", "text", "host", "%{host}"] - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The address of the STOMP server. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `""` - -The password to authenticate with. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `61613` - -The port to connect to on your STOMP server. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `""` - -The username to authenticate with. - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `nil` - -The vhost to use - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/stomp-v3.0.8.asciidoc b/docs/versioned-plugins/outputs/stomp-v3.0.8.asciidoc deleted file mode 100644 index a883b2eb0..000000000 --- a/docs/versioned-plugins/outputs/stomp-v3.0.8.asciidoc +++ /dev/null @@ -1,123 +0,0 @@ -:plugin: stomp -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.8 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-stomp/blob/v3.0.8/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Stomp output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output writes events using the STOMP protocol. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Stomp Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-debug>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-destination>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vhost>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-debug"] -===== `debug` - - * Value type is <> - * Default value is `false` - -Enable debugging output? - -[id="{version}-plugins-{type}s-{plugin}-destination"] -===== `destination` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The destination to read events from. Supports string expansion, meaning -`%{foo}` values will expand to the field value. - -Example: "/topic/logstash" - -[id="{version}-plugins-{type}s-{plugin}-headers"] -===== `headers` - - * Value type is <> - * There is no default value for this setting. - -Custom headers to send with each message. Supports string expansion, meaning -%{foo} values will expand to the field value. - -Example: headers => ["amq-msg-type", "text", "host", "%{host}"] - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The address of the STOMP server. - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * Value type is <> - * Default value is `""` - -The password to authenticate with. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `61613` - -The port to connect to on your STOMP server. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * Value type is <> - * Default value is `""` - -The username to authenticate with. - -[id="{version}-plugins-{type}s-{plugin}-vhost"] -===== `vhost` - - * Value type is <> - * Default value is `nil` - -The vhost to use - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/syslog-index.asciidoc b/docs/versioned-plugins/outputs/syslog-index.asciidoc deleted file mode 100644 index 27a75c337..000000000 --- a/docs/versioned-plugins/outputs/syslog-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: syslog -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::syslog-v3.0.4.asciidoc[] -include::syslog-v3.0.3.asciidoc[] -include::syslog-v3.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/syslog-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/syslog-v3.0.2.asciidoc deleted file mode 100644 index 1bbb6a4e2..000000000 --- a/docs/versioned-plugins/outputs/syslog-v3.0.2.asciidoc +++ /dev/null @@ -1,239 +0,0 @@ -:plugin: syslog -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-syslog/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Syslog output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Send events to a syslog server. - -You can send messages compliant with RFC3164 or RFC5424 -using either UDP or TCP as the transport protocol. - -By default the contents of the `message` field will be shipped as -the free-form message text part of the emitted syslog message. If -your messages don't have a `message` field or if you for some other -reason want to change the emitted message, modify the `message` -configuration option. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Syslog Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-appname>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-facility>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-msgid>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-priority>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-procid>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-protocol>> |<>, one of `["tcp", "udp", "ssl-tcp"]`|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-rfc>> |<>, one of `["rfc3164", "rfc5424"]`|No -| <<{version}-plugins-{type}s-{plugin}-severity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sourcehost>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-appname"] -===== `appname` - - * Value type is <> - * Default value is `"LOGSTASH"` - -application name for syslog message. The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-facility"] -===== `facility` - - * Value type is <> - * Default value is `"user-level"` - -facility label for syslog message -default fallback to user-level as in rfc3164 -The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -syslog server address to connect to - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * Default value is `"%{message}"` - -message text to log. The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-msgid"] -===== `msgid` - - * Value type is <> - * Default value is `"-"` - -message id for syslog message. The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -syslog server port to connect to - -[id="{version}-plugins-{type}s-{plugin}-priority"] -===== `priority` - - * Value type is <> - * Default value is `"%{syslog_pri}"` - -syslog priority -The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-procid"] -===== `procid` - - * Value type is <> - * Default value is `"-"` - -process id for syslog message. The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-protocol"] -===== `protocol` - - * Value can be any of: `tcp`, `udp`, `ssl-tcp` - * Default value is `"udp"` - -syslog server protocol. you can choose between udp, tcp and ssl/tls over tcp - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `1` - -when connection fails, retry interval in sec. - -[id="{version}-plugins-{type}s-{plugin}-rfc"] -===== `rfc` - - * Value can be any of: `rfc3164`, `rfc5424` - * Default value is `"rfc3164"` - -syslog message format: you can choose between rfc3164 or rfc5424 - -[id="{version}-plugins-{type}s-{plugin}-severity"] -===== `severity` - - * Value type is <> - * Default value is `"notice"` - -severity label for syslog message -default fallback to notice as in rfc3164 -The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-sourcehost"] -===== `sourcehost` - - * Value type is <> - * Default value is `"%{host}"` - -source host for syslog message. The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] -===== `ssl_cacert` - - * Value type is <> - * There is no default value for this setting. - -The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] -===== `ssl_cert` - - * Value type is <> - * There is no default value for this setting. - -SSL certificate path - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is <> - * There is no default value for this setting. - -SSL key path - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is <> - * Default value is `nil` - -SSL key passphrase - -[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] -===== `ssl_verify` - - * Value type is <> - * Default value is `false` - -Verify the identity of the other end of the SSL connection against the CA. - -[id="{version}-plugins-{type}s-{plugin}-use_labels"] -===== `use_labels` - - * Value type is <> - * Default value is `true` - -use label parsing for severity and facility levels -use priority field if set to false - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/syslog-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/syslog-v3.0.3.asciidoc deleted file mode 100644 index 4f0986290..000000000 --- a/docs/versioned-plugins/outputs/syslog-v3.0.3.asciidoc +++ /dev/null @@ -1,239 +0,0 @@ -:plugin: syslog -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-syslog/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Syslog output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Send events to a syslog server. - -You can send messages compliant with RFC3164 or RFC5424 -using either UDP or TCP as the transport protocol. - -By default the contents of the `message` field will be shipped as -the free-form message text part of the emitted syslog message. If -your messages don't have a `message` field or if you for some other -reason want to change the emitted message, modify the `message` -configuration option. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Syslog Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-appname>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-facility>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-msgid>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-priority>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-procid>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-protocol>> |<>, one of `["tcp", "udp", "ssl-tcp"]`|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-rfc>> |<>, one of `["rfc3164", "rfc5424"]`|No -| <<{version}-plugins-{type}s-{plugin}-severity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sourcehost>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-appname"] -===== `appname` - - * Value type is <> - * Default value is `"LOGSTASH"` - -application name for syslog message. The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-facility"] -===== `facility` - - * Value type is <> - * Default value is `"user-level"` - -facility label for syslog message -default fallback to user-level as in rfc3164 -The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -syslog server address to connect to - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * Default value is `"%{message}"` - -message text to log. The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-msgid"] -===== `msgid` - - * Value type is <> - * Default value is `"-"` - -message id for syslog message. The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -syslog server port to connect to - -[id="{version}-plugins-{type}s-{plugin}-priority"] -===== `priority` - - * Value type is <> - * Default value is `"%{syslog_pri}"` - -syslog priority -The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-procid"] -===== `procid` - - * Value type is <> - * Default value is `"-"` - -process id for syslog message. The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-protocol"] -===== `protocol` - - * Value can be any of: `tcp`, `udp`, `ssl-tcp` - * Default value is `"udp"` - -syslog server protocol. you can choose between udp, tcp and ssl/tls over tcp - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `1` - -when connection fails, retry interval in sec. - -[id="{version}-plugins-{type}s-{plugin}-rfc"] -===== `rfc` - - * Value can be any of: `rfc3164`, `rfc5424` - * Default value is `"rfc3164"` - -syslog message format: you can choose between rfc3164 or rfc5424 - -[id="{version}-plugins-{type}s-{plugin}-severity"] -===== `severity` - - * Value type is <> - * Default value is `"notice"` - -severity label for syslog message -default fallback to notice as in rfc3164 -The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-sourcehost"] -===== `sourcehost` - - * Value type is <> - * Default value is `"%{host}"` - -source host for syslog message. The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] -===== `ssl_cacert` - - * Value type is <> - * There is no default value for this setting. - -The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] -===== `ssl_cert` - - * Value type is <> - * There is no default value for this setting. - -SSL certificate path - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is <> - * There is no default value for this setting. - -SSL key path - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is <> - * Default value is `nil` - -SSL key passphrase - -[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] -===== `ssl_verify` - - * Value type is <> - * Default value is `false` - -Verify the identity of the other end of the SSL connection against the CA. - -[id="{version}-plugins-{type}s-{plugin}-use_labels"] -===== `use_labels` - - * Value type is <> - * Default value is `true` - -use label parsing for severity and facility levels -use priority field if set to false - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/syslog-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/syslog-v3.0.4.asciidoc deleted file mode 100644 index c8d6c78c2..000000000 --- a/docs/versioned-plugins/outputs/syslog-v3.0.4.asciidoc +++ /dev/null @@ -1,239 +0,0 @@ -:plugin: syslog -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-syslog/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Syslog output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Send events to a syslog server. - -You can send messages compliant with RFC3164 or RFC5424 -using either UDP or TCP as the transport protocol. - -By default the contents of the `message` field will be shipped as -the free-form message text part of the emitted syslog message. If -your messages don't have a `message` field or if you for some other -reason want to change the emitted message, modify the `message` -configuration option. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Syslog Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-appname>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-facility>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-msgid>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-priority>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-procid>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-protocol>> |<>, one of `["tcp", "udp", "ssl-tcp"]`|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-rfc>> |<>, one of `["rfc3164", "rfc5424"]`|No -| <<{version}-plugins-{type}s-{plugin}-severity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sourcehost>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_labels>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-appname"] -===== `appname` - - * Value type is <> - * Default value is `"LOGSTASH"` - -application name for syslog message. The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-facility"] -===== `facility` - - * Value type is <> - * Default value is `"user-level"` - -facility label for syslog message -default fallback to user-level as in rfc3164 -The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -syslog server address to connect to - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * Value type is <> - * Default value is `"%{message}"` - -message text to log. The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-msgid"] -===== `msgid` - - * Value type is <> - * Default value is `"-"` - -message id for syslog message. The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -syslog server port to connect to - -[id="{version}-plugins-{type}s-{plugin}-priority"] -===== `priority` - - * Value type is <> - * Default value is `"%{syslog_pri}"` - -syslog priority -The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-procid"] -===== `procid` - - * Value type is <> - * Default value is `"-"` - -process id for syslog message. The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-protocol"] -===== `protocol` - - * Value can be any of: `tcp`, `udp`, `ssl-tcp` - * Default value is `"udp"` - -syslog server protocol. you can choose between udp, tcp and ssl/tls over tcp - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `1` - -when connection fails, retry interval in sec. - -[id="{version}-plugins-{type}s-{plugin}-rfc"] -===== `rfc` - - * Value can be any of: `rfc3164`, `rfc5424` - * Default value is `"rfc3164"` - -syslog message format: you can choose between rfc3164 or rfc5424 - -[id="{version}-plugins-{type}s-{plugin}-severity"] -===== `severity` - - * Value type is <> - * Default value is `"notice"` - -severity label for syslog message -default fallback to notice as in rfc3164 -The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-sourcehost"] -===== `sourcehost` - - * Value type is <> - * Default value is `"%{host}"` - -source host for syslog message. The new value can include `%{foo}` strings -to help you build a new value from other parts of the event. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] -===== `ssl_cacert` - - * Value type is <> - * There is no default value for this setting. - -The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] -===== `ssl_cert` - - * Value type is <> - * There is no default value for this setting. - -SSL certificate path - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is <> - * There is no default value for this setting. - -SSL key path - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is <> - * Default value is `nil` - -SSL key passphrase - -[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] -===== `ssl_verify` - - * Value type is <> - * Default value is `false` - -Verify the identity of the other end of the SSL connection against the CA. - -[id="{version}-plugins-{type}s-{plugin}-use_labels"] -===== `use_labels` - - * Value type is <> - * Default value is `true` - -use label parsing for severity and facility levels -use priority field if set to false - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/tcp-index.asciidoc b/docs/versioned-plugins/outputs/tcp-index.asciidoc deleted file mode 100644 index 5919b7d83..000000000 --- a/docs/versioned-plugins/outputs/tcp-index.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -:plugin: tcp -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-08-01 -| <> | 2017-08-18 -| <> | 2017-06-23 -|======================================================================= - -include::tcp-v5.0.2.asciidoc[] -include::tcp-v5.0.1.asciidoc[] -include::tcp-v5.0.0.asciidoc[] -include::tcp-v4.0.2.asciidoc[] -include::tcp-v4.0.1.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/tcp-v4.0.1.asciidoc b/docs/versioned-plugins/outputs/tcp-v4.0.1.asciidoc deleted file mode 100644 index 2179f1262..000000000 --- a/docs/versioned-plugins/outputs/tcp-v4.0.1.asciidoc +++ /dev/null @@ -1,158 +0,0 @@ -:plugin: tcp -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-tcp/blob/v4.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Tcp output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events over a TCP socket. - -Each event json is separated by a newline. - -Can either accept connections from clients or connect to a server, -depending on `mode`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Tcp Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -When mode is `server`, the address to listen on. -When mode is `client`, the address to connect to. - -[id="{version}-plugins-{type}s-{plugin}-message_format"] -===== `message_format` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -The format to use when writing events to the file. This value -supports any string and can include `%{name}` and other dynamic -strings. - -If this setting is omitted, the full json representation of the -event will be written as a single line. - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"client"` - -Mode to operate in. `server` listens for client connections, -`client` connects to a server. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -When mode is `server`, the port to listen on. -When mode is `client`, the port to connect to. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `10` - -When connect failed,retry interval in sec. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] -===== `ssl_cacert` - - * Value type is <> - * There is no default value for this setting. - -The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] -===== `ssl_cert` - - * Value type is <> - * There is no default value for this setting. - -SSL certificate path - -[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] -===== `ssl_enable` - - * Value type is <> - * Default value is `false` - -Enable SSL (must be set for other `ssl_` options to take effect). - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is <> - * There is no default value for this setting. - -SSL key path - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is <> - * Default value is `nil` - -SSL key passphrase - -[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] -===== `ssl_verify` - - * Value type is <> - * Default value is `false` - -Verify the identity of the other end of the SSL connection against the CA. -For input, sets the field `sslsubject` to that of the client certificate. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/tcp-v4.0.2.asciidoc b/docs/versioned-plugins/outputs/tcp-v4.0.2.asciidoc deleted file mode 100644 index 8dd59e103..000000000 --- a/docs/versioned-plugins/outputs/tcp-v4.0.2.asciidoc +++ /dev/null @@ -1,158 +0,0 @@ -:plugin: tcp -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v4.0.2 -:release_date: 2017-08-18 -:changelog_url: https://github.com/logstash-plugins/logstash-output-tcp/blob/v4.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Tcp output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events over a TCP socket. - -Each event json is separated by a newline. - -Can either accept connections from clients or connect to a server, -depending on `mode`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Tcp Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -When mode is `server`, the address to listen on. -When mode is `client`, the address to connect to. - -[id="{version}-plugins-{type}s-{plugin}-message_format"] -===== `message_format` (DEPRECATED) - - * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> - * There is no default value for this setting. - -The format to use when writing events to the file. This value -supports any string and can include `%{name}` and other dynamic -strings. - -If this setting is omitted, the full json representation of the -event will be written as a single line. - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"client"` - -Mode to operate in. `server` listens for client connections, -`client` connects to a server. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -When mode is `server`, the port to listen on. -When mode is `client`, the port to connect to. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `10` - -When connect failed,retry interval in sec. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] -===== `ssl_cacert` - - * Value type is <> - * There is no default value for this setting. - -The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] -===== `ssl_cert` - - * Value type is <> - * There is no default value for this setting. - -SSL certificate path - -[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] -===== `ssl_enable` - - * Value type is <> - * Default value is `false` - -Enable SSL (must be set for other `ssl_` options to take effect). - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is <> - * There is no default value for this setting. - -SSL key path - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is <> - * Default value is `nil` - -SSL key passphrase - -[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] -===== `ssl_verify` - - * Value type is <> - * Default value is `false` - -Verify the identity of the other end of the SSL connection against the CA. -For input, sets the field `sslsubject` to that of the client certificate. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/tcp-v5.0.0.asciidoc b/docs/versioned-plugins/outputs/tcp-v5.0.0.asciidoc deleted file mode 100644 index c46777d17..000000000 --- a/docs/versioned-plugins/outputs/tcp-v5.0.0.asciidoc +++ /dev/null @@ -1,144 +0,0 @@ -:plugin: tcp -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.0 -:release_date: 2017-08-01 -:changelog_url: https://github.com/logstash-plugins/logstash-output-tcp/blob/v5.0.0/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Tcp output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events over a TCP socket. - -Each event json is separated by a newline. - -Can either accept connections from clients or connect to a server, -depending on `mode`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Tcp Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -When mode is `server`, the address to listen on. -When mode is `client`, the address to connect to. - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"client"` - -Mode to operate in. `server` listens for client connections, -`client` connects to a server. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -When mode is `server`, the port to listen on. -When mode is `client`, the port to connect to. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `10` - -When connect failed,retry interval in sec. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] -===== `ssl_cacert` - - * Value type is <> - * There is no default value for this setting. - -The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] -===== `ssl_cert` - - * Value type is <> - * There is no default value for this setting. - -SSL certificate path - -[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] -===== `ssl_enable` - - * Value type is <> - * Default value is `false` - -Enable SSL (must be set for other `ssl_` options to take effect). - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is <> - * There is no default value for this setting. - -SSL key path - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is <> - * Default value is `nil` - -SSL key passphrase - -[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] -===== `ssl_verify` - - * Value type is <> - * Default value is `false` - -Verify the identity of the other end of the SSL connection against the CA. -For input, sets the field `sslsubject` to that of the client certificate. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/tcp-v5.0.1.asciidoc b/docs/versioned-plugins/outputs/tcp-v5.0.1.asciidoc deleted file mode 100644 index ba365b42d..000000000 --- a/docs/versioned-plugins/outputs/tcp-v5.0.1.asciidoc +++ /dev/null @@ -1,144 +0,0 @@ -:plugin: tcp -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.1 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-tcp/blob/v5.0.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Tcp output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events over a TCP socket. - -Each event json is separated by a newline. - -Can either accept connections from clients or connect to a server, -depending on `mode`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Tcp Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -When mode is `server`, the address to listen on. -When mode is `client`, the address to connect to. - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"client"` - -Mode to operate in. `server` listens for client connections, -`client` connects to a server. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -When mode is `server`, the port to listen on. -When mode is `client`, the port to connect to. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `10` - -When connect failed,retry interval in sec. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] -===== `ssl_cacert` - - * Value type is <> - * There is no default value for this setting. - -The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] -===== `ssl_cert` - - * Value type is <> - * There is no default value for this setting. - -SSL certificate path - -[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] -===== `ssl_enable` - - * Value type is <> - * Default value is `false` - -Enable SSL (must be set for other `ssl_` options to take effect). - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is <> - * There is no default value for this setting. - -SSL key path - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is <> - * Default value is `nil` - -SSL key passphrase - -[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] -===== `ssl_verify` - - * Value type is <> - * Default value is `false` - -Verify the identity of the other end of the SSL connection against the CA. -For input, sets the field `sslsubject` to that of the client certificate. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/tcp-v5.0.2.asciidoc b/docs/versioned-plugins/outputs/tcp-v5.0.2.asciidoc deleted file mode 100644 index e4b0257f0..000000000 --- a/docs/versioned-plugins/outputs/tcp-v5.0.2.asciidoc +++ /dev/null @@ -1,144 +0,0 @@ -:plugin: tcp -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v5.0.2 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-tcp/blob/v5.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Tcp output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events over a TCP socket. - -Each event json is separated by a newline. - -Can either accept connections from clients or connect to a server, -depending on `mode`. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Tcp Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -When mode is `server`, the address to listen on. -When mode is `client`, the address to connect to. - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"client"` - -Mode to operate in. `server` listens for client connections, -`client` connects to a server. - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -When mode is `server`, the port to listen on. -When mode is `client`, the port to connect to. - -[id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] -===== `reconnect_interval` - - * Value type is <> - * Default value is `10` - -When connect failed,retry interval in sec. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cacert"] -===== `ssl_cacert` - - * Value type is <> - * There is no default value for this setting. - -The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] -===== `ssl_cert` - - * Value type is <> - * There is no default value for this setting. - -SSL certificate path - -[id="{version}-plugins-{type}s-{plugin}-ssl_enable"] -===== `ssl_enable` - - * Value type is <> - * Default value is `false` - -Enable SSL (must be set for other `ssl_` options to take effect). - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is <> - * There is no default value for this setting. - -SSL key path - -[id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] -===== `ssl_key_passphrase` - - * Value type is <> - * Default value is `nil` - -SSL key passphrase - -[id="{version}-plugins-{type}s-{plugin}-ssl_verify"] -===== `ssl_verify` - - * Value type is <> - * Default value is `false` - -Verify the identity of the other end of the SSL connection against the CA. -For input, sets the field `sslsubject` to that of the client certificate. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/timber-index.asciidoc b/docs/versioned-plugins/outputs/timber-index.asciidoc deleted file mode 100644 index 64662fcfb..000000000 --- a/docs/versioned-plugins/outputs/timber-index.asciidoc +++ /dev/null @@ -1,12 +0,0 @@ -:plugin: timber -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-09-02 -|======================================================================= - -include::timber-v1.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/timber-v1.0.3.asciidoc b/docs/versioned-plugins/outputs/timber-v1.0.3.asciidoc deleted file mode 100644 index fbe37b524..000000000 --- a/docs/versioned-plugins/outputs/timber-v1.0.3.asciidoc +++ /dev/null @@ -1,228 +0,0 @@ -:plugin: timber -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v1.0.3 -:release_date: 2017-09-02 -:changelog_url: https://github.com/logstash-plugins/logstash-output-timber/blob/v1.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Timber output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output sends structured events to the https://timber.io[Timber.io logging service]. -Timber is a cloud-based logging service designed for developers, providing easy features -out of the box that make you more productive. -https://timber.io/docs/app/console/tail-a-user[Tail users], -https://timber.io/docs/app/console/trace-http-requests[trace requests], -https://timber.io/docs/app/console/inspect-http-requests[inspect HTTP parameters], -and https://timber.io/docs/app/console/searching[search] on rich structured data without -sacrificing readability. - -Internally, it's a highly efficient HTTP transport that uses batching and retries for -fast and reliable delivery. - -This output will execute up to 'pool_max' requests in parallel for performance. -Consider this when tuning this plugin for performance. The default of 50 should -be sufficient for most setups. - -Additionally, note that when parallel execution is used strict ordering of events is not -guaranteed! - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Timber Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-api_key>> |Your Timber.io API key|No -| <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-client_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-connect_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keystore_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<<,>>|No -| <<{version}-plugins-{type}s-{plugin}-request_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-socket_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-truststore_type>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-api_key"] -===== `api_key` - - * Value type is <> - * There is no default value for this setting. - -Your Timber.io API key. You can obtain your API by creating an app in the -[Timber console](https://app.timber.io). - - -[id="{version}-plugins-{type}s-{plugin}-cacert"] -===== `cacert` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom X.509 CA (.pem certs) specify the path to that here. - - -[id="{version}-plugins-{type}s-{plugin}-client_cert"] -===== `client_cert` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use a client certificate (note, most people don't want this) set the path to the x509 cert here - - -[id="{version}-plugins-{type}s-{plugin}-client_key"] -===== `client_key` - - * Value type is <> - * There is no default value for this setting. - -If you're using a client certificate specify the path to the encryption key here - - -[id="{version}-plugins-{type}s-{plugin}-connect_timeout"] -===== `connect_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for a connection to be established. Default is `10s` - - -[id="{version}-plugins-{type}s-{plugin}-keystore"] -===== `keystore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom keystore (`.jks`) specify that here. This does not work with .pem keys! - - -[id="{version}-plugins-{type}s-{plugin}-keystore_password"] -===== `keystore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the keystore password here. -Note, most .jks files created with keytool require a password! - - -[id="{version}-plugins-{type}s-{plugin}-keystore_type"] -===== `keystore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the keystore type here. One of `JKS` or `PKCS12`. Default is `JKS` - - -[id="{version}-plugins-{type}s-{plugin}-pool_max"] -===== `pool_max` - - * Value type is <> - * Default value is `50` - -Max number of concurrent connections. Defaults to `50` - - -[id="{version}-plugins-{type}s-{plugin}-proxy"] -===== `proxy` - - * Value type is <> - * There is no default value for this setting. - -If you'd like to use an HTTP proxy . This supports multiple configuration syntaxes: - -1. Proxy host in form: `http://proxy.org:1234` -2. Proxy host in form: `{host => "proxy.org", port => 80, scheme => 'http', user => 'username@host', password => 'password'}` -3. Proxy host in form: `{url => 'http://proxy.org:1234', user => 'username@host', password => 'password'}` - - -[id="{version}-plugins-{type}s-{plugin}-request_timeout"] -===== `request_timeout` - - * Value type is <> - * Default value is `60` - -This module makes it easy to add a very fully configured HTTP client to logstash -based on [Manticore](https://github.com/cheald/manticore). -For an example of its usage see https://github.com/logstash-plugins/logstash-input-http_poller -Timeout (in seconds) for the entire request - - -[id="{version}-plugins-{type}s-{plugin}-socket_timeout"] -===== `socket_timeout` - - * Value type is <> - * Default value is `10` - -Timeout (in seconds) to wait for data on the socket. Default is `10s` - - -[id="{version}-plugins-{type}s-{plugin}-ssl_certificate_validation"] -===== `ssl_certificate_validation` - - * Value type is <> - * Default value is `true` - -Set this to false to disable SSL/TLS certificate validation -Note: setting this to false is generally considered insecure! - - -[id="{version}-plugins-{type}s-{plugin}-truststore"] -===== `truststore` - - * Value type is <> - * There is no default value for this setting. - -If you need to use a custom truststore (`.jks`) specify that here. This does not work with .pem certs! - - -[id="{version}-plugins-{type}s-{plugin}-truststore_password"] -===== `truststore_password` - - * Value type is <> - * There is no default value for this setting. - -Specify the truststore password here. -Note, most .jks files created with keytool require a password! - - -[id="{version}-plugins-{type}s-{plugin}-truststore_type"] -===== `truststore_type` - - * Value type is <> - * Default value is `"JKS"` - -Specify the truststore type here. One of `JKS` or `PKCS12`. Default is `JKS` - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/udp-index.asciidoc b/docs/versioned-plugins/outputs/udp-index.asciidoc deleted file mode 100644 index 9404b6832..000000000 --- a/docs/versioned-plugins/outputs/udp-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: udp -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::udp-v3.0.5.asciidoc[] -include::udp-v3.0.4.asciidoc[] -include::udp-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/udp-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/udp-v3.0.3.asciidoc deleted file mode 100644 index 7621f4675..000000000 --- a/docs/versioned-plugins/outputs/udp-v3.0.3.asciidoc +++ /dev/null @@ -1,65 +0,0 @@ -:plugin: udp -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-udp/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Udp output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Send events over UDP - -Keep in mind that UDP will lose messages. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Udp Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The address to send messages to - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The port to send messages on - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/udp-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/udp-v3.0.4.asciidoc deleted file mode 100644 index d186993ed..000000000 --- a/docs/versioned-plugins/outputs/udp-v3.0.4.asciidoc +++ /dev/null @@ -1,65 +0,0 @@ -:plugin: udp -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-udp/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Udp output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Send events over UDP - -Keep in mind that UDP will lose messages. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Udp Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The address to send messages to - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The port to send messages on - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/udp-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/udp-v3.0.5.asciidoc deleted file mode 100644 index 45c7ab8a5..000000000 --- a/docs/versioned-plugins/outputs/udp-v3.0.5.asciidoc +++ /dev/null @@ -1,65 +0,0 @@ -:plugin: udp -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-udp/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Udp output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Send events over UDP - -Keep in mind that UDP will lose messages. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Udp Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The address to send messages to - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The port to send messages on - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/webhdfs-index.asciidoc b/docs/versioned-plugins/outputs/webhdfs-index.asciidoc deleted file mode 100644 index 1512342fc..000000000 --- a/docs/versioned-plugins/outputs/webhdfs-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: webhdfs -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::webhdfs-v3.0.5.asciidoc[] -include::webhdfs-v3.0.4.asciidoc[] -include::webhdfs-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/webhdfs-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/webhdfs-v3.0.3.asciidoc deleted file mode 100644 index 8a6e5fb1f..000000000 --- a/docs/versioned-plugins/outputs/webhdfs-v3.0.3.asciidoc +++ /dev/null @@ -1,293 +0,0 @@ -:plugin: webhdfs -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-webhdfs/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Webhdfs output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This plugin sends Logstash events into files in HDFS via -the https://hadoop.apache.org/docs/r1.0.4/webhdfs.html[webhdfs] REST API. - -==== Dependencies -This plugin has no dependency on jars from hadoop, thus reducing configuration and compatibility -problems. It uses the webhdfs gem from Kazuki Ohta and TAGOMORI Satoshi (@see: https://github.com/kzk/webhdfs). -Optional dependencies are zlib and snappy gem if you use the compression functionality. - -==== Operational Notes -If you get an error like: - - Max write retries reached. Exception: initialize: name or service not known {:level=>:error} - -make sure that the hostname of your namenode is resolvable on the host running Logstash. When creating/appending -to a file, webhdfs somtime sends a `307 TEMPORARY_REDIRECT` with the `HOSTNAME` of the machine its running on. - -==== Usage -This is an example of Logstash config: - -[source,ruby] ----------------------------------- -input { - ... -} -filter { - ... -} -output { - webhdfs { - host => "127.0.0.1" # (required) - port => 50070 # (optional, default: 50070) - path => "/user/logstash/dt=%{+YYYY-MM-dd}/logstash-%{+HH}.log" # (required) - user => "hue" # (required) - } -} ----------------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Webhdfs Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-compression>> |<>, one of `["none", "snappy", "gzip"]`|No -| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_keytab>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-open_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-read_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_known_errors>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_times>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-single_file_per_thread>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-snappy_bufsize>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-snappy_format>> |<>, one of `["stream", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-standby_host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-standby_port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_httpfs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_kerberos_auth>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_ssl_auth>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-compression"] -===== `compression` - - * Value can be any of: `none`, `snappy`, `gzip` - * Default value is `"none"` - -Compress output. One of ['none', 'snappy', 'gzip'] - -[id="{version}-plugins-{type}s-{plugin}-flush_size"] -===== `flush_size` - - * Value type is <> - * Default value is `500` - -Sending data to webhdfs if event count is above, even if `store_interval_in_secs` is not reached. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The server name for webhdfs/httpfs connections. - -[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] -===== `idle_flush_time` - - * Value type is <> - * Default value is `1` - -Sending data to webhdfs in x seconds intervals. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_keytab"] -===== `kerberos_keytab` - - * Value type is <> - * There is no default value for this setting. - -Set kerberos keytab file. Note that the gssapi library needs to be available to use this. - -[id="{version}-plugins-{type}s-{plugin}-open_timeout"] -===== `open_timeout` - - * Value type is <> - * Default value is `30` - -WebHdfs open timeout, default 30s. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The path to the file to write to. Event fields can be used here, -as well as date fields in the joda time format, e.g.: -`/user/logstash/dt=%{+YYYY-MM-dd}/%{@source_host}-%{+HH}.log` - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `50070` - -The server port for webhdfs/httpfs connections. - -[id="{version}-plugins-{type}s-{plugin}-read_timeout"] -===== `read_timeout` - - * Value type is <> - * Default value is `30` - -The WebHdfs read timeout, default 30s. - -[id="{version}-plugins-{type}s-{plugin}-retry_interval"] -===== `retry_interval` - - * Value type is <> - * Default value is `0.5` - -How long should we wait between retries. - -[id="{version}-plugins-{type}s-{plugin}-retry_known_errors"] -===== `retry_known_errors` - - * Value type is <> - * Default value is `true` - -Retry some known webhdfs errors. These may be caused by race conditions when appending to same file, etc. - -[id="{version}-plugins-{type}s-{plugin}-retry_times"] -===== `retry_times` - - * Value type is <> - * Default value is `5` - -How many times should we retry. If retry_times is exceeded, an error will be logged and the event will be discarded. - -[id="{version}-plugins-{type}s-{plugin}-single_file_per_thread"] -===== `single_file_per_thread` - - * Value type is <> - * Default value is `false` - -Avoid appending to same file in multiple threads. -This solves some problems with multiple logstash output threads and locked file leases in webhdfs. -If this option is set to true, %{[@metadata][thread_id]} needs to be used in path config settting. - -[id="{version}-plugins-{type}s-{plugin}-snappy_bufsize"] -===== `snappy_bufsize` - - * Value type is <> - * Default value is `32768` - -Set snappy chunksize. Only neccessary for stream format. Defaults to 32k. Max is 65536 -@see http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt - -[id="{version}-plugins-{type}s-{plugin}-snappy_format"] -===== `snappy_format` - - * Value can be any of: `stream`, `file` - * Default value is `"stream"` - -Set snappy format. One of "stream", "file". Set to stream to be hive compatible. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] -===== `ssl_cert` - - * Value type is <> - * There is no default value for this setting. - -Set ssl cert file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is <> - * There is no default value for this setting. - -Set ssl key file. - -[id="{version}-plugins-{type}s-{plugin}-standby_host"] -===== `standby_host` - - * Value type is <> - * Default value is `false` - -Standby namenode for ha hdfs. - -[id="{version}-plugins-{type}s-{plugin}-standby_port"] -===== `standby_port` - - * Value type is <> - * Default value is `50070` - -Standby namenode port for ha hdfs. - -[id="{version}-plugins-{type}s-{plugin}-use_httpfs"] -===== `use_httpfs` - - * Value type is <> - * Default value is `false` - -Use httpfs mode if set to true, else webhdfs. - -[id="{version}-plugins-{type}s-{plugin}-use_kerberos_auth"] -===== `use_kerberos_auth` - - * Value type is <> - * Default value is `false` - -Set kerberos authentication. - -[id="{version}-plugins-{type}s-{plugin}-use_ssl_auth"] -===== `use_ssl_auth` - - * Value type is <> - * Default value is `false` - -Set ssl authentication. Note that the openssl library needs to be available to use this. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The Username for webhdfs. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/webhdfs-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/webhdfs-v3.0.4.asciidoc deleted file mode 100644 index 7cdab4354..000000000 --- a/docs/versioned-plugins/outputs/webhdfs-v3.0.4.asciidoc +++ /dev/null @@ -1,293 +0,0 @@ -:plugin: webhdfs -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-webhdfs/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Webhdfs output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This plugin sends Logstash events into files in HDFS via -the https://hadoop.apache.org/docs/r1.0.4/webhdfs.html[webhdfs] REST API. - -==== Dependencies -This plugin has no dependency on jars from hadoop, thus reducing configuration and compatibility -problems. It uses the webhdfs gem from Kazuki Ohta and TAGOMORI Satoshi (@see: https://github.com/kzk/webhdfs). -Optional dependencies are zlib and snappy gem if you use the compression functionality. - -==== Operational Notes -If you get an error like: - - Max write retries reached. Exception: initialize: name or service not known {:level=>:error} - -make sure that the hostname of your namenode is resolvable on the host running Logstash. When creating/appending -to a file, webhdfs somtime sends a `307 TEMPORARY_REDIRECT` with the `HOSTNAME` of the machine its running on. - -==== Usage -This is an example of Logstash config: - -[source,ruby] ----------------------------------- -input { - ... -} -filter { - ... -} -output { - webhdfs { - host => "127.0.0.1" # (required) - port => 50070 # (optional, default: 50070) - path => "/user/logstash/dt=%{+YYYY-MM-dd}/logstash-%{+HH}.log" # (required) - user => "hue" # (required) - } -} ----------------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Webhdfs Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-compression>> |<>, one of `["none", "snappy", "gzip"]`|No -| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_keytab>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-open_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-read_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_known_errors>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_times>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-single_file_per_thread>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-snappy_bufsize>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-snappy_format>> |<>, one of `["stream", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-standby_host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-standby_port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_httpfs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_kerberos_auth>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_ssl_auth>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-compression"] -===== `compression` - - * Value can be any of: `none`, `snappy`, `gzip` - * Default value is `"none"` - -Compress output. One of ['none', 'snappy', 'gzip'] - -[id="{version}-plugins-{type}s-{plugin}-flush_size"] -===== `flush_size` - - * Value type is <> - * Default value is `500` - -Sending data to webhdfs if event count is above, even if `store_interval_in_secs` is not reached. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The server name for webhdfs/httpfs connections. - -[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] -===== `idle_flush_time` - - * Value type is <> - * Default value is `1` - -Sending data to webhdfs in x seconds intervals. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_keytab"] -===== `kerberos_keytab` - - * Value type is <> - * There is no default value for this setting. - -Set kerberos keytab file. Note that the gssapi library needs to be available to use this. - -[id="{version}-plugins-{type}s-{plugin}-open_timeout"] -===== `open_timeout` - - * Value type is <> - * Default value is `30` - -WebHdfs open timeout, default 30s. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The path to the file to write to. Event fields can be used here, -as well as date fields in the joda time format, e.g.: -`/user/logstash/dt=%{+YYYY-MM-dd}/%{@source_host}-%{+HH}.log` - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `50070` - -The server port for webhdfs/httpfs connections. - -[id="{version}-plugins-{type}s-{plugin}-read_timeout"] -===== `read_timeout` - - * Value type is <> - * Default value is `30` - -The WebHdfs read timeout, default 30s. - -[id="{version}-plugins-{type}s-{plugin}-retry_interval"] -===== `retry_interval` - - * Value type is <> - * Default value is `0.5` - -How long should we wait between retries. - -[id="{version}-plugins-{type}s-{plugin}-retry_known_errors"] -===== `retry_known_errors` - - * Value type is <> - * Default value is `true` - -Retry some known webhdfs errors. These may be caused by race conditions when appending to same file, etc. - -[id="{version}-plugins-{type}s-{plugin}-retry_times"] -===== `retry_times` - - * Value type is <> - * Default value is `5` - -How many times should we retry. If retry_times is exceeded, an error will be logged and the event will be discarded. - -[id="{version}-plugins-{type}s-{plugin}-single_file_per_thread"] -===== `single_file_per_thread` - - * Value type is <> - * Default value is `false` - -Avoid appending to same file in multiple threads. -This solves some problems with multiple logstash output threads and locked file leases in webhdfs. -If this option is set to true, %{[@metadata][thread_id]} needs to be used in path config settting. - -[id="{version}-plugins-{type}s-{plugin}-snappy_bufsize"] -===== `snappy_bufsize` - - * Value type is <> - * Default value is `32768` - -Set snappy chunksize. Only neccessary for stream format. Defaults to 32k. Max is 65536 -@see http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt - -[id="{version}-plugins-{type}s-{plugin}-snappy_format"] -===== `snappy_format` - - * Value can be any of: `stream`, `file` - * Default value is `"stream"` - -Set snappy format. One of "stream", "file". Set to stream to be hive compatible. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] -===== `ssl_cert` - - * Value type is <> - * There is no default value for this setting. - -Set ssl cert file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is <> - * There is no default value for this setting. - -Set ssl key file. - -[id="{version}-plugins-{type}s-{plugin}-standby_host"] -===== `standby_host` - - * Value type is <> - * Default value is `false` - -Standby namenode for ha hdfs. - -[id="{version}-plugins-{type}s-{plugin}-standby_port"] -===== `standby_port` - - * Value type is <> - * Default value is `50070` - -Standby namenode port for ha hdfs. - -[id="{version}-plugins-{type}s-{plugin}-use_httpfs"] -===== `use_httpfs` - - * Value type is <> - * Default value is `false` - -Use httpfs mode if set to true, else webhdfs. - -[id="{version}-plugins-{type}s-{plugin}-use_kerberos_auth"] -===== `use_kerberos_auth` - - * Value type is <> - * Default value is `false` - -Set kerberos authentication. - -[id="{version}-plugins-{type}s-{plugin}-use_ssl_auth"] -===== `use_ssl_auth` - - * Value type is <> - * Default value is `false` - -Set ssl authentication. Note that the openssl library needs to be available to use this. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The Username for webhdfs. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/webhdfs-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/webhdfs-v3.0.5.asciidoc deleted file mode 100644 index 0f57ef8a5..000000000 --- a/docs/versioned-plugins/outputs/webhdfs-v3.0.5.asciidoc +++ /dev/null @@ -1,293 +0,0 @@ -:plugin: webhdfs -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-webhdfs/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Webhdfs output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This plugin sends Logstash events into files in HDFS via -the https://hadoop.apache.org/docs/r1.0.4/webhdfs.html[webhdfs] REST API. - -==== Dependencies -This plugin has no dependency on jars from hadoop, thus reducing configuration and compatibility -problems. It uses the webhdfs gem from Kazuki Ohta and TAGOMORI Satoshi (@see: https://github.com/kzk/webhdfs). -Optional dependencies are zlib and snappy gem if you use the compression functionality. - -==== Operational Notes -If you get an error like: - - Max write retries reached. Exception: initialize: name or service not known {:level=>:error} - -make sure that the hostname of your namenode is resolvable on the host running Logstash. When creating/appending -to a file, webhdfs somtime sends a `307 TEMPORARY_REDIRECT` with the `HOSTNAME` of the machine its running on. - -==== Usage -This is an example of Logstash config: - -[source,ruby] ----------------------------------- -input { - ... -} -filter { - ... -} -output { - webhdfs { - host => "127.0.0.1" # (required) - port => 50070 # (optional, default: 50070) - path => "/user/logstash/dt=%{+YYYY-MM-dd}/logstash-%{+HH}.log" # (required) - user => "hue" # (required) - } -} ----------------------------------- - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Webhdfs Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-compression>> |<>, one of `["none", "snappy", "gzip"]`|No -| <<{version}-plugins-{type}s-{plugin}-flush_size>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-idle_flush_time>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-kerberos_keytab>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-open_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-read_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_known_errors>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_times>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-single_file_per_thread>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-snappy_bufsize>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-snappy_format>> |<>, one of `["stream", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-standby_host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-standby_port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_httpfs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_kerberos_auth>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-use_ssl_auth>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-compression"] -===== `compression` - - * Value can be any of: `none`, `snappy`, `gzip` - * Default value is `"none"` - -Compress output. One of ['none', 'snappy', 'gzip'] - -[id="{version}-plugins-{type}s-{plugin}-flush_size"] -===== `flush_size` - - * Value type is <> - * Default value is `500` - -Sending data to webhdfs if event count is above, even if `store_interval_in_secs` is not reached. - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The server name for webhdfs/httpfs connections. - -[id="{version}-plugins-{type}s-{plugin}-idle_flush_time"] -===== `idle_flush_time` - - * Value type is <> - * Default value is `1` - -Sending data to webhdfs in x seconds intervals. - -[id="{version}-plugins-{type}s-{plugin}-kerberos_keytab"] -===== `kerberos_keytab` - - * Value type is <> - * There is no default value for this setting. - -Set kerberos keytab file. Note that the gssapi library needs to be available to use this. - -[id="{version}-plugins-{type}s-{plugin}-open_timeout"] -===== `open_timeout` - - * Value type is <> - * Default value is `30` - -WebHdfs open timeout, default 30s. - -[id="{version}-plugins-{type}s-{plugin}-path"] -===== `path` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The path to the file to write to. Event fields can be used here, -as well as date fields in the joda time format, e.g.: -`/user/logstash/dt=%{+YYYY-MM-dd}/%{@source_host}-%{+HH}.log` - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `50070` - -The server port for webhdfs/httpfs connections. - -[id="{version}-plugins-{type}s-{plugin}-read_timeout"] -===== `read_timeout` - - * Value type is <> - * Default value is `30` - -The WebHdfs read timeout, default 30s. - -[id="{version}-plugins-{type}s-{plugin}-retry_interval"] -===== `retry_interval` - - * Value type is <> - * Default value is `0.5` - -How long should we wait between retries. - -[id="{version}-plugins-{type}s-{plugin}-retry_known_errors"] -===== `retry_known_errors` - - * Value type is <> - * Default value is `true` - -Retry some known webhdfs errors. These may be caused by race conditions when appending to same file, etc. - -[id="{version}-plugins-{type}s-{plugin}-retry_times"] -===== `retry_times` - - * Value type is <> - * Default value is `5` - -How many times should we retry. If retry_times is exceeded, an error will be logged and the event will be discarded. - -[id="{version}-plugins-{type}s-{plugin}-single_file_per_thread"] -===== `single_file_per_thread` - - * Value type is <> - * Default value is `false` - -Avoid appending to same file in multiple threads. -This solves some problems with multiple logstash output threads and locked file leases in webhdfs. -If this option is set to true, %{[@metadata][thread_id]} needs to be used in path config settting. - -[id="{version}-plugins-{type}s-{plugin}-snappy_bufsize"] -===== `snappy_bufsize` - - * Value type is <> - * Default value is `32768` - -Set snappy chunksize. Only neccessary for stream format. Defaults to 32k. Max is 65536 -@see http://code.google.com/p/snappy/source/browse/trunk/framing_format.txt - -[id="{version}-plugins-{type}s-{plugin}-snappy_format"] -===== `snappy_format` - - * Value can be any of: `stream`, `file` - * Default value is `"stream"` - -Set snappy format. One of "stream", "file". Set to stream to be hive compatible. - -[id="{version}-plugins-{type}s-{plugin}-ssl_cert"] -===== `ssl_cert` - - * Value type is <> - * There is no default value for this setting. - -Set ssl cert file. - -[id="{version}-plugins-{type}s-{plugin}-ssl_key"] -===== `ssl_key` - - * Value type is <> - * There is no default value for this setting. - -Set ssl key file. - -[id="{version}-plugins-{type}s-{plugin}-standby_host"] -===== `standby_host` - - * Value type is <> - * Default value is `false` - -Standby namenode for ha hdfs. - -[id="{version}-plugins-{type}s-{plugin}-standby_port"] -===== `standby_port` - - * Value type is <> - * Default value is `50070` - -Standby namenode port for ha hdfs. - -[id="{version}-plugins-{type}s-{plugin}-use_httpfs"] -===== `use_httpfs` - - * Value type is <> - * Default value is `false` - -Use httpfs mode if set to true, else webhdfs. - -[id="{version}-plugins-{type}s-{plugin}-use_kerberos_auth"] -===== `use_kerberos_auth` - - * Value type is <> - * Default value is `false` - -Set kerberos authentication. - -[id="{version}-plugins-{type}s-{plugin}-use_ssl_auth"] -===== `use_ssl_auth` - - * Value type is <> - * Default value is `false` - -Set ssl authentication. Note that the openssl library needs to be available to use this. - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The Username for webhdfs. - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/websocket-index.asciidoc b/docs/versioned-plugins/outputs/websocket-index.asciidoc deleted file mode 100644 index 4ea61ad44..000000000 --- a/docs/versioned-plugins/outputs/websocket-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: websocket -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::websocket-v3.0.4.asciidoc[] -include::websocket-v3.0.3.asciidoc[] -include::websocket-v3.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/websocket-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/websocket-v3.0.2.asciidoc deleted file mode 100644 index 54f767166..000000000 --- a/docs/versioned-plugins/outputs/websocket-v3.0.2.asciidoc +++ /dev/null @@ -1,66 +0,0 @@ -:plugin: websocket -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-websocket/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Websocket output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output runs a websocket server and publishes any -messages to all connected websocket clients. - -You can connect to it with ws://:/ - -If no clients are connected, any messages received are ignored. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Websocket Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address to serve websocket data from - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `3232` - -The port to serve websocket data from - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/websocket-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/websocket-v3.0.3.asciidoc deleted file mode 100644 index dfde774b4..000000000 --- a/docs/versioned-plugins/outputs/websocket-v3.0.3.asciidoc +++ /dev/null @@ -1,66 +0,0 @@ -:plugin: websocket -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-websocket/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Websocket output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output runs a websocket server and publishes any -messages to all connected websocket clients. - -You can connect to it with ws://:/ - -If no clients are connected, any messages received are ignored. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Websocket Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address to serve websocket data from - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `3232` - -The port to serve websocket data from - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/websocket-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/websocket-v3.0.4.asciidoc deleted file mode 100644 index de6bb13b8..000000000 --- a/docs/versioned-plugins/outputs/websocket-v3.0.4.asciidoc +++ /dev/null @@ -1,66 +0,0 @@ -:plugin: websocket -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-websocket/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Websocket output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output runs a websocket server and publishes any -messages to all connected websocket clients. - -You can connect to it with ws://:/ - -If no clients are connected, any messages received are ignored. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Websocket Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * Default value is `"0.0.0.0"` - -The address to serve websocket data from - -[id="{version}-plugins-{type}s-{plugin}-port"] -===== `port` - - * Value type is <> - * Default value is `3232` - -The port to serve websocket data from - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/xmpp-index.asciidoc b/docs/versioned-plugins/outputs/xmpp-index.asciidoc deleted file mode 100644 index 1ecfabf15..000000000 --- a/docs/versioned-plugins/outputs/xmpp-index.asciidoc +++ /dev/null @@ -1,20 +0,0 @@ -:plugin: xmpp -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-27 -| <> | 2017-06-23 -| <> | 2017-05-03 -|======================================================================= - -include::xmpp-v3.0.7.asciidoc[] -include::xmpp-v3.0.6.asciidoc[] -include::xmpp-v3.0.5.asciidoc[] -include::xmpp-v3.0.4.asciidoc[] -include::xmpp-v3.0.3.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/xmpp-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/xmpp-v3.0.3.asciidoc deleted file mode 100644 index 8956c4a2c..000000000 --- a/docs/versioned-plugins/outputs/xmpp-v3.0.3.asciidoc +++ /dev/null @@ -1,104 +0,0 @@ -:plugin: xmpp -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-05-03 -:changelog_url: https://github.com/logstash-plugins/logstash-output-xmpp/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Xmpp - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output allows you ship events over XMPP/Jabber. - -This plugin can be used for posting events to humans over XMPP, or you can -use it for PubSub or general message passing for logstash to logstash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Xmpp Output Configuration Options - -This plugin supports the following configuration options plus the <> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-users>> |<>|No -|======================================================================= - -Also see <> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * There is no default value for this setting. - -The xmpp server to connect to. This is optional. If you omit this setting, -the host on the user/identity is used. (foo.com for user@foo.com) - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The message to send. This supports dynamic strings like `%{host}` - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The xmpp password for the user/identity. - -[id="{version}-plugins-{type}s-{plugin}-rooms"] -===== `rooms` - - * Value type is <> - * There is no default value for this setting. - -if muc/multi-user-chat required, give the name of the room that -you want to join: room@conference.domain/nick - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The user or resource ID, like foo@example.com. - -[id="{version}-plugins-{type}s-{plugin}-users"] -===== `users` - - * Value type is <> - * There is no default value for this setting. - -The users to send messages to - - - -include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/xmpp-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/xmpp-v3.0.4.asciidoc deleted file mode 100644 index cdf1a6eac..000000000 --- a/docs/versioned-plugins/outputs/xmpp-v3.0.4.asciidoc +++ /dev/null @@ -1,105 +0,0 @@ -:plugin: xmpp -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-xmpp/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Xmpp output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output allows you ship events over XMPP/Jabber. - -This plugin can be used for posting events to humans over XMPP, or you can -use it for PubSub or general message passing for logstash to logstash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Xmpp Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-users>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * There is no default value for this setting. - -The xmpp server to connect to. This is optional. If you omit this setting, -the host on the user/identity is used. (foo.com for user@foo.com) - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The message to send. This supports dynamic strings like `%{host}` - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The xmpp password for the user/identity. - -[id="{version}-plugins-{type}s-{plugin}-rooms"] -===== `rooms` - - * Value type is <> - * There is no default value for this setting. - -if muc/multi-user-chat required, give the name of the room that -you want to join: room@conference.domain/nick - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The user or resource ID, like foo@example.com. - -[id="{version}-plugins-{type}s-{plugin}-users"] -===== `users` - - * Value type is <> - * There is no default value for this setting. - -The users to send messages to - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/xmpp-v3.0.5.asciidoc b/docs/versioned-plugins/outputs/xmpp-v3.0.5.asciidoc deleted file mode 100644 index 81bd598ed..000000000 --- a/docs/versioned-plugins/outputs/xmpp-v3.0.5.asciidoc +++ /dev/null @@ -1,105 +0,0 @@ -:plugin: xmpp -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.5 -:release_date: 2017-06-27 -:changelog_url: https://github.com/logstash-plugins/logstash-output-xmpp/blob/v3.0.5/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Xmpp output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output allows you ship events over XMPP/Jabber. - -This plugin can be used for posting events to humans over XMPP, or you can -use it for PubSub or general message passing for logstash to logstash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Xmpp Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-users>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * There is no default value for this setting. - -The xmpp server to connect to. This is optional. If you omit this setting, -the host on the user/identity is used. (foo.com for user@foo.com) - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The message to send. This supports dynamic strings like `%{host}` - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The xmpp password for the user/identity. - -[id="{version}-plugins-{type}s-{plugin}-rooms"] -===== `rooms` - - * Value type is <> - * There is no default value for this setting. - -if muc/multi-user-chat required, give the name of the room that -you want to join: room@conference.domain/nick - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The user or resource ID, like foo@example.com. - -[id="{version}-plugins-{type}s-{plugin}-users"] -===== `users` - - * Value type is <> - * There is no default value for this setting. - -The users to send messages to - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/xmpp-v3.0.6.asciidoc b/docs/versioned-plugins/outputs/xmpp-v3.0.6.asciidoc deleted file mode 100644 index 4ca7772b1..000000000 --- a/docs/versioned-plugins/outputs/xmpp-v3.0.6.asciidoc +++ /dev/null @@ -1,105 +0,0 @@ -:plugin: xmpp -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.6 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-xmpp/blob/v3.0.6/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Xmpp output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output allows you ship events over XMPP/Jabber. - -This plugin can be used for posting events to humans over XMPP, or you can -use it for PubSub or general message passing for logstash to logstash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Xmpp Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-users>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * There is no default value for this setting. - -The xmpp server to connect to. This is optional. If you omit this setting, -the host on the user/identity is used. (foo.com for user@foo.com) - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The message to send. This supports dynamic strings like `%{host}` - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The xmpp password for the user/identity. - -[id="{version}-plugins-{type}s-{plugin}-rooms"] -===== `rooms` - - * Value type is <> - * There is no default value for this setting. - -if muc/multi-user-chat required, give the name of the room that -you want to join: room@conference.domain/nick - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The user or resource ID, like foo@example.com. - -[id="{version}-plugins-{type}s-{plugin}-users"] -===== `users` - - * Value type is <> - * There is no default value for this setting. - -The users to send messages to - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/xmpp-v3.0.7.asciidoc b/docs/versioned-plugins/outputs/xmpp-v3.0.7.asciidoc deleted file mode 100644 index 8edfc1d36..000000000 --- a/docs/versioned-plugins/outputs/xmpp-v3.0.7.asciidoc +++ /dev/null @@ -1,105 +0,0 @@ -:plugin: xmpp -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.7 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-xmpp/blob/v3.0.7/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Xmpp output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -This output allows you ship events over XMPP/Jabber. - -This plugin can be used for posting events to humans over XMPP, or you can -use it for PubSub or general message passing for logstash to logstash. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Xmpp Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-message>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-rooms>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-users>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-host"] -===== `host` - - * Value type is <> - * There is no default value for this setting. - -The xmpp server to connect to. This is optional. If you omit this setting, -the host on the user/identity is used. (foo.com for user@foo.com) - -[id="{version}-plugins-{type}s-{plugin}-message"] -===== `message` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The message to send. This supports dynamic strings like `%{host}` - -[id="{version}-plugins-{type}s-{plugin}-password"] -===== `password` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The xmpp password for the user/identity. - -[id="{version}-plugins-{type}s-{plugin}-rooms"] -===== `rooms` - - * Value type is <> - * There is no default value for this setting. - -if muc/multi-user-chat required, give the name of the room that -you want to join: room@conference.domain/nick - -[id="{version}-plugins-{type}s-{plugin}-user"] -===== `user` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The user or resource ID, like foo@example.com. - -[id="{version}-plugins-{type}s-{plugin}-users"] -===== `users` - - * Value type is <> - * There is no default value for this setting. - -The users to send messages to - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/zabbix-index.asciidoc b/docs/versioned-plugins/outputs/zabbix-index.asciidoc deleted file mode 100644 index f01cb1d88..000000000 --- a/docs/versioned-plugins/outputs/zabbix-index.asciidoc +++ /dev/null @@ -1,16 +0,0 @@ -:plugin: zabbix -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-08-16 -| <> | 2017-06-23 -|======================================================================= - -include::zabbix-v3.0.4.asciidoc[] -include::zabbix-v3.0.3.asciidoc[] -include::zabbix-v3.0.2.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/zabbix-v3.0.2.asciidoc b/docs/versioned-plugins/outputs/zabbix-v3.0.2.asciidoc deleted file mode 100644 index f5ac0de0b..000000000 --- a/docs/versioned-plugins/outputs/zabbix-v3.0.2.asciidoc +++ /dev/null @@ -1,160 +0,0 @@ -:plugin: zabbix -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.2 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-zabbix/blob/v3.0.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Zabbix output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The Zabbix output is used to send item data (key/value pairs) to a Zabbix -server. The event `@timestamp` will automatically be associated with the -Zabbix item data. - -The Zabbix Sender protocol is described at -https://www.zabbix.org/wiki/Docs/protocols/zabbix_sender/2.0 -Zabbix uses a kind of nested key/value store. - -[source,txt] - host - ├── item1 - │ └── value1 - ├── item2 - │ └── value2 - ├── ... - │ └── ... - ├── item_n - │ └── value_n - -Each "host" is an identifier, and each item is associated with that host. -Items are typed on the Zabbix side. You can send numbers as strings and -Zabbix will Do The Right Thing. - -In the Zabbix UI, ensure that your hostname matches the value referenced by -`zabbix_host`. Create the item with the key as it appears in the field -referenced by `zabbix_key`. In the item configuration window, ensure that the -type dropdown is set to Zabbix Trapper. Also be sure to set the type of -information that Zabbix should expect for this item. - -This plugin does not currently send in batches. While it is possible to do -so, this is not supported. Be careful not to flood your Zabbix server with -too many events per second. - -NOTE: This plugin will log a warning if a necessary field is missing. It will -not attempt to resend if Zabbix is down, but will log an error message. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Zabbix Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-multi_value>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-zabbix_host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-zabbix_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-zabbix_server_host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-zabbix_server_port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-zabbix_value>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-multi_value"] -===== `multi_value` - - * Value type is <> - * There is no default value for this setting. - -Use the `multi_value` directive to send multiple key/value pairs. -This can be thought of as an array, like: - -`[ zabbix_key1, zabbix_value1, zabbix_key2, zabbix_value2, ... zabbix_keyN, zabbix_valueN ]` - -...where `zabbix_key1` is an instance of `zabbix_key`, and `zabbix_value1` -is an instance of `zabbix_value`. If the field referenced by any -`zabbix_key` or `zabbix_value` does not exist, that entry will be ignored. - -This directive cannot be used in conjunction with the single-value directives -`zabbix_key` and `zabbix_value`. - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `1` - -The number of seconds to wait before giving up on a connection to the Zabbix -server. This number should be very small, otherwise delays in delivery of -other outputs could result. - -[id="{version}-plugins-{type}s-{plugin}-zabbix_host"] -===== `zabbix_host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field name which holds the Zabbix host name. This can be a sub-field of -the @metadata field. - -[id="{version}-plugins-{type}s-{plugin}-zabbix_key"] -===== `zabbix_key` - - * Value type is <> - * There is no default value for this setting. - -A single field name which holds the value you intend to use as the Zabbix -item key. This can be a sub-field of the @metadata field. -This directive will be ignored if using `multi_value` - -IMPORTANT: `zabbix_key` is required if not using `multi_value`. - - -[id="{version}-plugins-{type}s-{plugin}-zabbix_server_host"] -===== `zabbix_server_host` - - * Value type is <> - * Default value is `"localhost"` - -The IP or resolvable hostname where the Zabbix server is running - -[id="{version}-plugins-{type}s-{plugin}-zabbix_server_port"] -===== `zabbix_server_port` - - * Value type is <> - * Default value is `10051` - -The port on which the Zabbix server is running - -[id="{version}-plugins-{type}s-{plugin}-zabbix_value"] -===== `zabbix_value` - - * Value type is <> - * Default value is `"message"` - -The field name which holds the value you want to send. -This directive will be ignored if using `multi_value` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/zabbix-v3.0.3.asciidoc b/docs/versioned-plugins/outputs/zabbix-v3.0.3.asciidoc deleted file mode 100644 index b07a813b2..000000000 --- a/docs/versioned-plugins/outputs/zabbix-v3.0.3.asciidoc +++ /dev/null @@ -1,160 +0,0 @@ -:plugin: zabbix -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.3 -:release_date: 2017-08-16 -:changelog_url: https://github.com/logstash-plugins/logstash-output-zabbix/blob/v3.0.3/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Zabbix output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The Zabbix output is used to send item data (key/value pairs) to a Zabbix -server. The event `@timestamp` will automatically be associated with the -Zabbix item data. - -The Zabbix Sender protocol is described at -https://www.zabbix.org/wiki/Docs/protocols/zabbix_sender/2.0 -Zabbix uses a kind of nested key/value store. - -[source,txt] - host - ├── item1 - │ └── value1 - ├── item2 - │ └── value2 - ├── ... - │ └── ... - ├── item_n - │ └── value_n - -Each "host" is an identifier, and each item is associated with that host. -Items are typed on the Zabbix side. You can send numbers as strings and -Zabbix will Do The Right Thing. - -In the Zabbix UI, ensure that your hostname matches the value referenced by -`zabbix_host`. Create the item with the key as it appears in the field -referenced by `zabbix_key`. In the item configuration window, ensure that the -type dropdown is set to Zabbix Trapper. Also be sure to set the type of -information that Zabbix should expect for this item. - -This plugin does not currently send in batches. While it is possible to do -so, this is not supported. Be careful not to flood your Zabbix server with -too many events per second. - -NOTE: This plugin will log a warning if a necessary field is missing. It will -not attempt to resend if Zabbix is down, but will log an error message. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Zabbix Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-multi_value>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-zabbix_host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-zabbix_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-zabbix_server_host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-zabbix_server_port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-zabbix_value>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-multi_value"] -===== `multi_value` - - * Value type is <> - * There is no default value for this setting. - -Use the `multi_value` directive to send multiple key/value pairs. -This can be thought of as an array, like: - -`[ zabbix_key1, zabbix_value1, zabbix_key2, zabbix_value2, ... zabbix_keyN, zabbix_valueN ]` - -...where `zabbix_key1` is an instance of `zabbix_key`, and `zabbix_value1` -is an instance of `zabbix_value`. If the field referenced by any -`zabbix_key` or `zabbix_value` does not exist, that entry will be ignored. - -This directive cannot be used in conjunction with the single-value directives -`zabbix_key` and `zabbix_value`. - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `1` - -The number of seconds to wait before giving up on a connection to the Zabbix -server. This number should be very small, otherwise delays in delivery of -other outputs could result. - -[id="{version}-plugins-{type}s-{plugin}-zabbix_host"] -===== `zabbix_host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field name which holds the Zabbix host name. This can be a sub-field of -the @metadata field. - -[id="{version}-plugins-{type}s-{plugin}-zabbix_key"] -===== `zabbix_key` - - * Value type is <> - * There is no default value for this setting. - -A single field name which holds the value you intend to use as the Zabbix -item key. This can be a sub-field of the @metadata field. -This directive will be ignored if using `multi_value` - -IMPORTANT: `zabbix_key` is required if not using `multi_value`. - - -[id="{version}-plugins-{type}s-{plugin}-zabbix_server_host"] -===== `zabbix_server_host` - - * Value type is <> - * Default value is `"localhost"` - -The IP or resolvable hostname where the Zabbix server is running - -[id="{version}-plugins-{type}s-{plugin}-zabbix_server_port"] -===== `zabbix_server_port` - - * Value type is <> - * Default value is `10051` - -The port on which the Zabbix server is running - -[id="{version}-plugins-{type}s-{plugin}-zabbix_value"] -===== `zabbix_value` - - * Value type is <> - * Default value is `"message"` - -The field name which holds the value you want to send. -This directive will be ignored if using `multi_value` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/zabbix-v3.0.4.asciidoc b/docs/versioned-plugins/outputs/zabbix-v3.0.4.asciidoc deleted file mode 100644 index 440e7616f..000000000 --- a/docs/versioned-plugins/outputs/zabbix-v3.0.4.asciidoc +++ /dev/null @@ -1,160 +0,0 @@ -:plugin: zabbix -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.0.4 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-zabbix/blob/v3.0.4/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Zabbix output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -The Zabbix output is used to send item data (key/value pairs) to a Zabbix -server. The event `@timestamp` will automatically be associated with the -Zabbix item data. - -The Zabbix Sender protocol is described at -https://www.zabbix.org/wiki/Docs/protocols/zabbix_sender/2.0 -Zabbix uses a kind of nested key/value store. - -[source,txt] - host - ├── item1 - │ └── value1 - ├── item2 - │ └── value2 - ├── ... - │ └── ... - ├── item_n - │ └── value_n - -Each "host" is an identifier, and each item is associated with that host. -Items are typed on the Zabbix side. You can send numbers as strings and -Zabbix will Do The Right Thing. - -In the Zabbix UI, ensure that your hostname matches the value referenced by -`zabbix_host`. Create the item with the key as it appears in the field -referenced by `zabbix_key`. In the item configuration window, ensure that the -type dropdown is set to Zabbix Trapper. Also be sure to set the type of -information that Zabbix should expect for this item. - -This plugin does not currently send in batches. While it is possible to do -so, this is not supported. Be careful not to flood your Zabbix server with -too many events per second. - -NOTE: This plugin will log a warning if a necessary field is missing. It will -not attempt to resend if Zabbix is down, but will log an error message. - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Zabbix Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-multi_value>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-zabbix_host>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-zabbix_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-zabbix_server_host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-zabbix_server_port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-zabbix_value>> |<>|No -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-multi_value"] -===== `multi_value` - - * Value type is <> - * There is no default value for this setting. - -Use the `multi_value` directive to send multiple key/value pairs. -This can be thought of as an array, like: - -`[ zabbix_key1, zabbix_value1, zabbix_key2, zabbix_value2, ... zabbix_keyN, zabbix_valueN ]` - -...where `zabbix_key1` is an instance of `zabbix_key`, and `zabbix_value1` -is an instance of `zabbix_value`. If the field referenced by any -`zabbix_key` or `zabbix_value` does not exist, that entry will be ignored. - -This directive cannot be used in conjunction with the single-value directives -`zabbix_key` and `zabbix_value`. - -[id="{version}-plugins-{type}s-{plugin}-timeout"] -===== `timeout` - - * Value type is <> - * Default value is `1` - -The number of seconds to wait before giving up on a connection to the Zabbix -server. This number should be very small, otherwise delays in delivery of -other outputs could result. - -[id="{version}-plugins-{type}s-{plugin}-zabbix_host"] -===== `zabbix_host` - - * This is a required setting. - * Value type is <> - * There is no default value for this setting. - -The field name which holds the Zabbix host name. This can be a sub-field of -the @metadata field. - -[id="{version}-plugins-{type}s-{plugin}-zabbix_key"] -===== `zabbix_key` - - * Value type is <> - * There is no default value for this setting. - -A single field name which holds the value you intend to use as the Zabbix -item key. This can be a sub-field of the @metadata field. -This directive will be ignored if using `multi_value` - -IMPORTANT: `zabbix_key` is required if not using `multi_value`. - - -[id="{version}-plugins-{type}s-{plugin}-zabbix_server_host"] -===== `zabbix_server_host` - - * Value type is <> - * Default value is `"localhost"` - -The IP or resolvable hostname where the Zabbix server is running - -[id="{version}-plugins-{type}s-{plugin}-zabbix_server_port"] -===== `zabbix_server_port` - - * Value type is <> - * Default value is `10051` - -The port on which the Zabbix server is running - -[id="{version}-plugins-{type}s-{plugin}-zabbix_value"] -===== `zabbix_value` - - * Value type is <> - * Default value is `"message"` - -The field name which holds the value you want to send. -This directive will be ignored if using `multi_value` - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/zeromq-index.asciidoc b/docs/versioned-plugins/outputs/zeromq-index.asciidoc deleted file mode 100644 index 1808a3372..000000000 --- a/docs/versioned-plugins/outputs/zeromq-index.asciidoc +++ /dev/null @@ -1,14 +0,0 @@ -:plugin: zeromq -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -| <> | 2017-11-13 -| <> | 2017-06-23 -|======================================================================= - -include::zeromq-v3.1.2.asciidoc[] -include::zeromq-v3.1.1.asciidoc[] - diff --git a/docs/versioned-plugins/outputs/zeromq-v3.1.1.asciidoc b/docs/versioned-plugins/outputs/zeromq-v3.1.1.asciidoc deleted file mode 100644 index 64f604eef..000000000 --- a/docs/versioned-plugins/outputs/zeromq-v3.1.1.asciidoc +++ /dev/null @@ -1,126 +0,0 @@ -:plugin: zeromq -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.1 -:release_date: 2017-06-23 -:changelog_url: https://github.com/logstash-plugins/logstash-output-zeromq/blob/v3.1.1/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="plugins-{type}-{plugin}"] - -=== Zeromq output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events to a 0MQ PUB socket. - -You need to have the 0mq 2.1.x library installed to be able to use -this output plugin. - -The default settings will create a publisher connecting to a subscriber -bound to tcp://127.0.0.1:2120 - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Zeromq Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-sockopt>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topic>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topology>> |<>, one of `["pushpull", "pubsub", "pair"]`|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-address"] -===== `address` - - * Value type is <> - * Default value is `["tcp://127.0.0.1:2120"]` - -This will be a performance bottleneck. Someone needs to upgrade this to -concurrency :shared and make sure there is no breakage -0mq socket address to connect or bind. -Please note that `inproc://` will not work with logstashi. -For each we use a context per thread. -By default, inputs bind/listen and outputs connect. - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"client"` - -Server mode binds/listens. Client mode connects. - -[id="{version}-plugins-{type}s-{plugin}-sockopt"] -===== `sockopt` - - * Value type is <> - * There is no default value for this setting. - -This exposes zmq_setsockopt for advanced tuning. -See http://api.zeromq.org/2-1:zmq-setsockopt for details. - -This is where you would set values like: - -* ZMQ::HWM - high water mark -* ZMQ::IDENTITY - named queues -* ZMQ::SWAP_SIZE - space for disk overflow - -Example: -[source,ruby] - sockopt => { - "ZMQ::HWM" => 50 - "ZMQ::IDENTITY" => "my_named_queue" - } - -[id="{version}-plugins-{type}s-{plugin}-topic"] -===== `topic` - - * Value type is <> - * Default value is `""` - -This is used for the 'pubsub' topology only. -On inputs, this allows you to filter messages by topic. -On outputs, this allows you to tag a message for routing. -NOTE: ZeroMQ does subscriber-side filtering -NOTE: Topic is evaluated with `event.sprintf` so macros are valid here. - -[id="{version}-plugins-{type}s-{plugin}-topology"] -===== `topology` - - * This is a required setting. - * Value can be any of: `pushpull`, `pubsub`, `pair` - * There is no default value for this setting. - -The default logstash topologies work as follows: - -* pushpull - inputs are pull, outputs are push -* pubsub - inputs are subscribers, outputs are publishers -* pair - inputs are clients, outputs are servers - -If the predefined topology flows don't work for you, -you can change the 'mode' setting - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/zeromq-v3.1.2.asciidoc b/docs/versioned-plugins/outputs/zeromq-v3.1.2.asciidoc deleted file mode 100644 index fca296baf..000000000 --- a/docs/versioned-plugins/outputs/zeromq-v3.1.2.asciidoc +++ /dev/null @@ -1,126 +0,0 @@ -:plugin: zeromq -:type: output - -/////////////////////////////////////////// -START - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// -:version: v3.1.2 -:release_date: 2017-11-13 -:changelog_url: https://github.com/logstash-plugins/logstash-output-zeromq/blob/v3.1.2/CHANGELOG.md -:include_path: ../../../../logstash/docs/include -/////////////////////////////////////////// -END - GENERATED VARIABLES, DO NOT EDIT! -/////////////////////////////////////////// - -[id="{version}-plugins-{type}s-{plugin}"] - -=== Zeromq output plugin {version} - -include::{include_path}/plugin_header.asciidoc[] - -==== Description - -Write events to a 0MQ PUB socket. - -You need to have the 0mq 2.1.x library installed to be able to use -this output plugin. - -The default settings will create a publisher connecting to a subscriber -bound to tcp://127.0.0.1:2120 - - -[id="{version}-plugins-{type}s-{plugin}-options"] -==== Zeromq Output Configuration Options - -This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. - -[cols="<,<,<",options="header",] -|======================================================================= -|Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-sockopt>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topic>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-topology>> |<>, one of `["pushpull", "pubsub", "pair"]`|Yes -|======================================================================= - -Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all -output plugins. - -  - -[id="{version}-plugins-{type}s-{plugin}-address"] -===== `address` - - * Value type is <> - * Default value is `["tcp://127.0.0.1:2120"]` - -This will be a performance bottleneck. Someone needs to upgrade this to -concurrency :shared and make sure there is no breakage -0mq socket address to connect or bind. -Please note that `inproc://` will not work with logstashi. -For each we use a context per thread. -By default, inputs bind/listen and outputs connect. - -[id="{version}-plugins-{type}s-{plugin}-mode"] -===== `mode` - - * Value can be any of: `server`, `client` - * Default value is `"client"` - -Server mode binds/listens. Client mode connects. - -[id="{version}-plugins-{type}s-{plugin}-sockopt"] -===== `sockopt` - - * Value type is <> - * There is no default value for this setting. - -This exposes zmq_setsockopt for advanced tuning. -See http://api.zeromq.org/2-1:zmq-setsockopt for details. - -This is where you would set values like: - -* ZMQ::HWM - high water mark -* ZMQ::IDENTITY - named queues -* ZMQ::SWAP_SIZE - space for disk overflow - -Example: -[source,ruby] - sockopt => { - "ZMQ::HWM" => 50 - "ZMQ::IDENTITY" => "my_named_queue" - } - -[id="{version}-plugins-{type}s-{plugin}-topic"] -===== `topic` - - * Value type is <> - * Default value is `""` - -This is used for the 'pubsub' topology only. -On inputs, this allows you to filter messages by topic. -On outputs, this allows you to tag a message for routing. -NOTE: ZeroMQ does subscriber-side filtering -NOTE: Topic is evaluated with `event.sprintf` so macros are valid here. - -[id="{version}-plugins-{type}s-{plugin}-topology"] -===== `topology` - - * This is a required setting. - * Value can be any of: `pushpull`, `pubsub`, `pair` - * There is no default value for this setting. - -The default logstash topologies work as follows: - -* pushpull - inputs are pull, outputs are push -* pubsub - inputs are subscribers, outputs are publishers -* pair - inputs are clients, outputs are servers - -If the predefined topology flows don't work for you, -you can change the 'mode' setting - - - -[id="{version}-plugins-{type}s-{plugin}-common-options"] -include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/outputs/zookeeper-index.asciidoc b/docs/versioned-plugins/outputs/zookeeper-index.asciidoc deleted file mode 100644 index 4efb92606..000000000 --- a/docs/versioned-plugins/outputs/zookeeper-index.asciidoc +++ /dev/null @@ -1,10 +0,0 @@ -:plugin: zookeeper -:type: output - -include::{include_path}/version-list-intro.asciidoc[] - -|======================================================================= -| Version | Release Date -|======================================================================= - - From af38a05bd42988a83f9d76e194adc596585651c0 Mon Sep 17 00:00:00 2001 From: DeDe Morton Date: Fri, 12 Jan 2018 17:05:40 -0800 Subject: [PATCH 5/5] Fix errors round 1 --- .../codecs/cef-v4.1.3.asciidoc | 38 ++-- .../codecs/cef-v4.1.4.asciidoc | 36 ++-- .../codecs/cef-v5.0.0.asciidoc | 36 ++-- .../codecs/cef-v5.0.1.asciidoc | 36 ++-- .../codecs/cef-v5.0.2.asciidoc | 36 ++-- .../codecs/json-v3.0.3.asciidoc | 4 +- .../codecs/json-v3.0.4.asciidoc | 2 +- .../codecs/json-v3.0.5.asciidoc | 2 +- .../codecs/rubydebug-v3.0.3.asciidoc | 6 +- .../codecs/rubydebug-v3.0.4.asciidoc | 4 +- .../codecs/rubydebug-v3.0.5.asciidoc | 4 +- .../filters/grok-v3.4.1.asciidoc | 54 ++--- .../filters/grok-v3.4.2.asciidoc | 46 ++--- .../filters/grok-v3.4.3.asciidoc | 44 ++-- .../filters/grok-v3.4.4.asciidoc | 44 ++-- .../filters/grok-v4.0.0.asciidoc | 44 ++-- .../filters/grok-v4.0.1.asciidoc | 44 ++-- .../filters/mutate-v3.1.5.asciidoc | 50 ++--- .../filters/mutate-v3.1.6.asciidoc | 48 ++--- .../filters/mutate-v3.1.7.asciidoc | 48 ++--- .../filters/mutate-v3.2.0.asciidoc | 48 ++--- .../filters/ruby-v3.0.3.asciidoc | 12 +- .../filters/ruby-v3.0.4.asciidoc | 10 +- .../filters/ruby-v3.1.0.asciidoc | 33 +-- .../filters/ruby-v3.1.1.asciidoc | 33 +-- .../filters/ruby-v3.1.2.asciidoc | 33 +-- .../filters/ruby-v3.1.3.asciidoc | 22 +- .../filters/sleep-v3.0.4.asciidoc | 14 +- .../filters/sleep-v3.0.5.asciidoc | 12 +- .../filters/sleep-v3.0.6.asciidoc | 12 +- docs/versioned-plugins/index.asciidoc | 8 +- .../inputs/beats-v4.0.1.asciidoc | 68 +++--- .../inputs/beats-v4.0.2.asciidoc | 70 +++---- .../inputs/beats-v4.0.3.asciidoc | 68 +++--- .../inputs/beats-v4.0.4.asciidoc | 60 +++--- .../inputs/beats-v4.0.5.asciidoc | 58 +++--- .../inputs/beats-v5.0.0.asciidoc | 54 ++--- .../inputs/beats-v5.0.1.asciidoc | 54 ++--- .../inputs/beats-v5.0.2.asciidoc | 54 ++--- .../inputs/beats-v5.0.3.asciidoc | 54 ++--- .../inputs/beats-v5.0.4.asciidoc | 54 ++--- .../inputs/beats-v5.0.5.asciidoc | 54 ++--- .../inputs/beats-v5.0.6.asciidoc | 54 ++--- .../inputs/http-v3.0.5.asciidoc | 42 ++-- .../inputs/http-v3.0.6.asciidoc | 40 ++-- .../inputs/http-v3.0.7.asciidoc | 40 ++-- .../inputs/http-v3.0.8.asciidoc | 40 ++-- .../inputs/s3-v3.1.5.asciidoc | 64 +++--- .../inputs/s3-v3.1.6.asciidoc | 62 +++--- .../inputs/s3-v3.1.7.asciidoc | 62 +++--- .../inputs/s3-v3.1.8.asciidoc | 62 +++--- .../inputs/s3-v3.1.9.asciidoc | 62 +++--- .../inputs/s3-v3.2.0.asciidoc | 62 +++--- .../inputs/tcp-v4.1.2.asciidoc | 40 ++-- .../inputs/tcp-v4.2.2.asciidoc | 38 ++-- .../inputs/tcp-v4.2.3.asciidoc | 38 ++-- .../inputs/tcp-v4.2.4.asciidoc | 38 ++-- .../inputs/tcp-v5.0.0.asciidoc | 34 +-- .../inputs/tcp-v5.0.1.asciidoc | 34 +-- .../inputs/tcp-v5.0.2.asciidoc | 34 +-- .../inputs/tcp-v5.0.3.asciidoc | 34 +-- .../outputs/elasticsearch-v7.3.2.asciidoc | 194 +++++++++--------- .../outputs/elasticsearch-v7.3.3.asciidoc | 194 +++++++++--------- .../outputs/elasticsearch-v7.3.4.asciidoc | 194 +++++++++--------- .../outputs/elasticsearch-v7.3.5.asciidoc | 194 +++++++++--------- .../outputs/elasticsearch-v7.3.6.asciidoc | 186 ++++++++--------- .../outputs/elasticsearch-v7.3.7.asciidoc | 184 ++++++++--------- .../outputs/elasticsearch-v7.3.8.asciidoc | 184 ++++++++--------- .../outputs/elasticsearch-v7.4.0.asciidoc | 184 ++++++++--------- .../outputs/elasticsearch-v7.4.1.asciidoc | 184 ++++++++--------- .../outputs/elasticsearch-v7.4.2.asciidoc | 190 ++++++++--------- .../outputs/elasticsearch-v8.0.0.asciidoc | 180 ++++++++-------- .../outputs/elasticsearch-v8.0.1.asciidoc | 180 ++++++++-------- .../outputs/elasticsearch-v8.1.1.asciidoc | 180 ++++++++-------- .../outputs/elasticsearch-v8.2.0.asciidoc | 180 ++++++++-------- .../outputs/elasticsearch-v8.2.2.asciidoc | 186 ++++++++--------- .../outputs/elasticsearch-v9.0.0.asciidoc | 186 ++++++++--------- .../outputs/elasticsearch-v9.0.2.asciidoc | 186 ++++++++--------- .../outputs/email-v4.0.4.asciidoc | 76 +++---- .../outputs/email-v4.0.6.asciidoc | 72 +++---- .../outputs/file-v4.0.2.asciidoc | 30 +-- .../outputs/file-v4.1.0.asciidoc | 28 +-- .../outputs/file-v4.1.1.asciidoc | 28 +-- .../outputs/file-v4.1.2.asciidoc | 28 +-- .../outputs/file-v4.2.0.asciidoc | 28 +-- .../outputs/file-v4.2.1.asciidoc | 28 +-- .../outputs/graphite-v3.1.2.asciidoc | 46 ++--- .../outputs/graphite-v3.1.3.asciidoc | 44 ++-- .../outputs/graphite-v3.1.4.asciidoc | 44 ++-- 89 files changed, 3019 insertions(+), 2990 deletions(-) diff --git a/docs/versioned-plugins/codecs/cef-v4.1.3.asciidoc b/docs/versioned-plugins/codecs/cef-v4.1.3.asciidoc index 7c1216983..91c65488f 100644 --- a/docs/versioned-plugins/codecs/cef-v4.1.3.asciidoc +++ b/docs/versioned-plugins/codecs/cef-v4.1.3.asciidoc @@ -12,7 +12,7 @@ START - GENERATED VARIABLES, DO NOT EDIT! END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Cef codec plugin {version} @@ -33,14 +33,14 @@ produce an event with the payload as the 'message' field and a '_cefparsefailure [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-product>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-severity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-signature>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vendor>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-product>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-signature>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-vendor>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |=======================================================================   @@ -48,7 +48,7 @@ produce an event with the payload as the 'message' field and a '_cefparsefailure [id="{version}-plugins-{type}s-{plugin}-delimiter"] ===== `delimiter` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. If your input puts a delimiter between each CEF event, you'll want to set @@ -72,7 +72,7 @@ This setting allows the following character sequences to have special meaning: ===== `deprecated_v1_fields` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Set this flag if you want to have both v1 and v2 fields indexed at the same time. Note that this option will increase @@ -82,7 +82,7 @@ This option is available to ease transition to new schema [id="{version}-plugins-{type}s-{plugin}-fields"] ===== `fields` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Fields to be included in CEV extension part as key/value pairs @@ -90,7 +90,7 @@ Fields to be included in CEV extension part as key/value pairs [id="{version}-plugins-{type}s-{plugin}-name"] ===== `name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Logstash"` Name field in CEF header. The new value can include `%{foo}` strings @@ -99,7 +99,7 @@ to help you build a new value from other parts of the event. [id="{version}-plugins-{type}s-{plugin}-product"] ===== `product` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Logstash"` Device product field in CEF header. The new value can include `%{foo}` strings @@ -109,7 +109,7 @@ to help you build a new value from other parts of the event. ===== `sev` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Deprecated severity field for CEF header. The new value can include `%{foo}` strings @@ -124,7 +124,7 @@ All invalid values will be mapped to the default of 6. [id="{version}-plugins-{type}s-{plugin}-severity"] ===== `severity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"6"` Severity field in CEF header. The new value can include `%{foo}` strings @@ -137,7 +137,7 @@ All invalid values will be mapped to the default of 6. [id="{version}-plugins-{type}s-{plugin}-signature"] ===== `signature` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Logstash"` Signature ID field in CEF header. The new value can include `%{foo}` strings @@ -146,7 +146,7 @@ to help you build a new value from other parts of the event. [id="{version}-plugins-{type}s-{plugin}-vendor"] ===== `vendor` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Elasticsearch"` Device vendor field in CEF header. The new value can include `%{foo}` strings @@ -155,7 +155,7 @@ to help you build a new value from other parts of the event. [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"1.0"` Device version field in CEF header. The new value can include `%{foo}` strings diff --git a/docs/versioned-plugins/codecs/cef-v4.1.4.asciidoc b/docs/versioned-plugins/codecs/cef-v4.1.4.asciidoc index da54ce679..238113395 100644 --- a/docs/versioned-plugins/codecs/cef-v4.1.4.asciidoc +++ b/docs/versioned-plugins/codecs/cef-v4.1.4.asciidoc @@ -33,14 +33,14 @@ produce an event with the payload as the 'message' field and a '_cefparsefailure [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-product>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-severity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-signature>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vendor>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-product>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-signature>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-vendor>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |=======================================================================   @@ -48,7 +48,7 @@ produce an event with the payload as the 'message' field and a '_cefparsefailure [id="{version}-plugins-{type}s-{plugin}-delimiter"] ===== `delimiter` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. If your input puts a delimiter between each CEF event, you'll want to set @@ -72,7 +72,7 @@ This setting allows the following character sequences to have special meaning: ===== `deprecated_v1_fields` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Set this flag if you want to have both v1 and v2 fields indexed at the same time. Note that this option will increase @@ -82,7 +82,7 @@ This option is available to ease transition to new schema [id="{version}-plugins-{type}s-{plugin}-fields"] ===== `fields` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Fields to be included in CEV extension part as key/value pairs @@ -90,7 +90,7 @@ Fields to be included in CEV extension part as key/value pairs [id="{version}-plugins-{type}s-{plugin}-name"] ===== `name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Logstash"` Name field in CEF header. The new value can include `%{foo}` strings @@ -99,7 +99,7 @@ to help you build a new value from other parts of the event. [id="{version}-plugins-{type}s-{plugin}-product"] ===== `product` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Logstash"` Device product field in CEF header. The new value can include `%{foo}` strings @@ -109,7 +109,7 @@ to help you build a new value from other parts of the event. ===== `sev` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Deprecated severity field for CEF header. The new value can include `%{foo}` strings @@ -124,7 +124,7 @@ All invalid values will be mapped to the default of 6. [id="{version}-plugins-{type}s-{plugin}-severity"] ===== `severity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"6"` Severity field in CEF header. The new value can include `%{foo}` strings @@ -137,7 +137,7 @@ All invalid values will be mapped to the default of 6. [id="{version}-plugins-{type}s-{plugin}-signature"] ===== `signature` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Logstash"` Signature ID field in CEF header. The new value can include `%{foo}` strings @@ -146,7 +146,7 @@ to help you build a new value from other parts of the event. [id="{version}-plugins-{type}s-{plugin}-vendor"] ===== `vendor` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Elasticsearch"` Device vendor field in CEF header. The new value can include `%{foo}` strings @@ -155,7 +155,7 @@ to help you build a new value from other parts of the event. [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"1.0"` Device version field in CEF header. The new value can include `%{foo}` strings diff --git a/docs/versioned-plugins/codecs/cef-v5.0.0.asciidoc b/docs/versioned-plugins/codecs/cef-v5.0.0.asciidoc index 0740425c8..4efedcba3 100644 --- a/docs/versioned-plugins/codecs/cef-v5.0.0.asciidoc +++ b/docs/versioned-plugins/codecs/cef-v5.0.0.asciidoc @@ -33,14 +33,14 @@ produce an event with the payload as the 'message' field and a '_cefparsefailure [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-product>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-severity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-signature>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vendor>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-product>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-signature>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-vendor>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |=======================================================================   @@ -48,7 +48,7 @@ produce an event with the payload as the 'message' field and a '_cefparsefailure [id="{version}-plugins-{type}s-{plugin}-delimiter"] ===== `delimiter` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. If your input puts a delimiter between each CEF event, you'll want to set @@ -72,13 +72,13 @@ This setting allows the following character sequences to have special meaning: ===== `deprecated_v1_fields` (OBSOLETE) * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting if used - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. [id="{version}-plugins-{type}s-{plugin}-fields"] ===== `fields` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Fields to be included in CEV extension part as key/value pairs @@ -86,7 +86,7 @@ Fields to be included in CEV extension part as key/value pairs [id="{version}-plugins-{type}s-{plugin}-name"] ===== `name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Logstash"` Name field in CEF header. The new value can include `%{foo}` strings @@ -95,7 +95,7 @@ to help you build a new value from other parts of the event. [id="{version}-plugins-{type}s-{plugin}-product"] ===== `product` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Logstash"` Device product field in CEF header. The new value can include `%{foo}` strings @@ -105,7 +105,7 @@ to help you build a new value from other parts of the event. ===== `sev` (OBSOLETE) * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Obsolete severity field for CEF header use :severity instead. @@ -113,7 +113,7 @@ Obsolete severity field for CEF header use :severity instead. [id="{version}-plugins-{type}s-{plugin}-severity"] ===== `severity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"6"` Severity field in CEF header. The new value can include `%{foo}` strings @@ -126,7 +126,7 @@ All invalid values will be mapped to the default of 6. [id="{version}-plugins-{type}s-{plugin}-signature"] ===== `signature` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Logstash"` Signature ID field in CEF header. The new value can include `%{foo}` strings @@ -135,7 +135,7 @@ to help you build a new value from other parts of the event. [id="{version}-plugins-{type}s-{plugin}-vendor"] ===== `vendor` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Elasticsearch"` Device vendor field in CEF header. The new value can include `%{foo}` strings @@ -144,7 +144,7 @@ to help you build a new value from other parts of the event. [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"1.0"` Device version field in CEF header. The new value can include `%{foo}` strings diff --git a/docs/versioned-plugins/codecs/cef-v5.0.1.asciidoc b/docs/versioned-plugins/codecs/cef-v5.0.1.asciidoc index 224361280..b0597a374 100644 --- a/docs/versioned-plugins/codecs/cef-v5.0.1.asciidoc +++ b/docs/versioned-plugins/codecs/cef-v5.0.1.asciidoc @@ -33,14 +33,14 @@ produce an event with the payload as the 'message' field and a '_cefparsefailure [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-product>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-severity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-signature>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vendor>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-product>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-signature>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-vendor>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |=======================================================================   @@ -48,7 +48,7 @@ produce an event with the payload as the 'message' field and a '_cefparsefailure [id="{version}-plugins-{type}s-{plugin}-delimiter"] ===== `delimiter` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. If your input puts a delimiter between each CEF event, you'll want to set @@ -72,13 +72,13 @@ This setting allows the following character sequences to have special meaning: ===== `deprecated_v1_fields` (OBSOLETE) * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting if used - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. [id="{version}-plugins-{type}s-{plugin}-fields"] ===== `fields` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Fields to be included in CEV extension part as key/value pairs @@ -86,7 +86,7 @@ Fields to be included in CEV extension part as key/value pairs [id="{version}-plugins-{type}s-{plugin}-name"] ===== `name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Logstash"` Name field in CEF header. The new value can include `%{foo}` strings @@ -95,7 +95,7 @@ to help you build a new value from other parts of the event. [id="{version}-plugins-{type}s-{plugin}-product"] ===== `product` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Logstash"` Device product field in CEF header. The new value can include `%{foo}` strings @@ -105,7 +105,7 @@ to help you build a new value from other parts of the event. ===== `sev` (OBSOLETE) * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Obsolete severity field for CEF header use :severity instead. @@ -113,7 +113,7 @@ Obsolete severity field for CEF header use :severity instead. [id="{version}-plugins-{type}s-{plugin}-severity"] ===== `severity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"6"` Severity field in CEF header. The new value can include `%{foo}` strings @@ -126,7 +126,7 @@ All invalid values will be mapped to the default of 6. [id="{version}-plugins-{type}s-{plugin}-signature"] ===== `signature` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Logstash"` Signature ID field in CEF header. The new value can include `%{foo}` strings @@ -135,7 +135,7 @@ to help you build a new value from other parts of the event. [id="{version}-plugins-{type}s-{plugin}-vendor"] ===== `vendor` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Elasticsearch"` Device vendor field in CEF header. The new value can include `%{foo}` strings @@ -144,7 +144,7 @@ to help you build a new value from other parts of the event. [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"1.0"` Device version field in CEF header. The new value can include `%{foo}` strings diff --git a/docs/versioned-plugins/codecs/cef-v5.0.2.asciidoc b/docs/versioned-plugins/codecs/cef-v5.0.2.asciidoc index 039366cad..74d8782a3 100644 --- a/docs/versioned-plugins/codecs/cef-v5.0.2.asciidoc +++ b/docs/versioned-plugins/codecs/cef-v5.0.2.asciidoc @@ -33,14 +33,14 @@ produce an event with the payload as the 'message' field and a '_cefparsefailure [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-delimiter>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-product>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-severity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-signature>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-vendor>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-delimiter>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-fields>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-product>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-severity>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-signature>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-vendor>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |=======================================================================   @@ -48,7 +48,7 @@ produce an event with the payload as the 'message' field and a '_cefparsefailure [id="{version}-plugins-{type}s-{plugin}-delimiter"] ===== `delimiter` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. If your input puts a delimiter between each CEF event, you'll want to set @@ -72,13 +72,13 @@ This setting allows the following character sequences to have special meaning: ===== `deprecated_v1_fields` (OBSOLETE) * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting if used - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. [id="{version}-plugins-{type}s-{plugin}-fields"] ===== `fields` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Fields to be included in CEV extension part as key/value pairs @@ -86,7 +86,7 @@ Fields to be included in CEV extension part as key/value pairs [id="{version}-plugins-{type}s-{plugin}-name"] ===== `name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Logstash"` Name field in CEF header. The new value can include `%{foo}` strings @@ -95,7 +95,7 @@ to help you build a new value from other parts of the event. [id="{version}-plugins-{type}s-{plugin}-product"] ===== `product` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Logstash"` Device product field in CEF header. The new value can include `%{foo}` strings @@ -105,7 +105,7 @@ to help you build a new value from other parts of the event. ===== `sev` (OBSOLETE) * OBSOLETE WARNING: This configuration item is obsolete and will prevent the pipeline from starting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Obsolete severity field for CEF header use :severity instead. @@ -113,7 +113,7 @@ Obsolete severity field for CEF header use :severity instead. [id="{version}-plugins-{type}s-{plugin}-severity"] ===== `severity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"6"` Severity field in CEF header. The new value can include `%{foo}` strings @@ -126,7 +126,7 @@ All invalid values will be mapped to the default of 6. [id="{version}-plugins-{type}s-{plugin}-signature"] ===== `signature` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Logstash"` Signature ID field in CEF header. The new value can include `%{foo}` strings @@ -135,7 +135,7 @@ to help you build a new value from other parts of the event. [id="{version}-plugins-{type}s-{plugin}-vendor"] ===== `vendor` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"Elasticsearch"` Device vendor field in CEF header. The new value can include `%{foo}` strings @@ -144,7 +144,7 @@ to help you build a new value from other parts of the event. [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"1.0"` Device version field in CEF header. The new value can include `%{foo}` strings diff --git a/docs/versioned-plugins/codecs/json-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/json-v3.0.3.asciidoc index af5c3c2b3..9fce12219 100644 --- a/docs/versioned-plugins/codecs/json-v3.0.3.asciidoc +++ b/docs/versioned-plugins/codecs/json-v3.0.3.asciidoc @@ -12,7 +12,7 @@ START - GENERATED VARIABLES, DO NOT EDIT! END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Json codec plugin {version} @@ -38,7 +38,7 @@ failure, the payload will be stored in the `message` field. [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-charset>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No |=======================================================================   diff --git a/docs/versioned-plugins/codecs/json-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/json-v3.0.4.asciidoc index b950918dc..45eff46e5 100644 --- a/docs/versioned-plugins/codecs/json-v3.0.4.asciidoc +++ b/docs/versioned-plugins/codecs/json-v3.0.4.asciidoc @@ -38,7 +38,7 @@ failure, the payload will be stored in the `message` field. [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-charset>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No |=======================================================================   diff --git a/docs/versioned-plugins/codecs/json-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/json-v3.0.5.asciidoc index a1a4c9865..96dc1ca76 100644 --- a/docs/versioned-plugins/codecs/json-v3.0.5.asciidoc +++ b/docs/versioned-plugins/codecs/json-v3.0.5.asciidoc @@ -38,7 +38,7 @@ failure, the payload will be stored in the `message` field. [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-charset>> |<>, one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No +| <<{version}-plugins-{type}s-{plugin}-charset>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["ASCII-8BIT", "UTF-8", "US-ASCII", "Big5", "Big5-HKSCS", "Big5-UAO", "CP949", "Emacs-Mule", "EUC-JP", "EUC-KR", "EUC-TW", "GB2312", "GB18030", "GBK", "ISO-8859-1", "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5", "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-9", "ISO-8859-10", "ISO-8859-11", "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16", "KOI8-R", "KOI8-U", "Shift_JIS", "UTF-16BE", "UTF-16LE", "UTF-32BE", "UTF-32LE", "Windows-31J", "Windows-1250", "Windows-1251", "Windows-1252", "IBM437", "IBM737", "IBM775", "CP850", "IBM852", "CP852", "IBM855", "CP855", "IBM857", "IBM860", "IBM861", "IBM862", "IBM863", "IBM864", "IBM865", "IBM866", "IBM869", "Windows-1258", "GB1988", "macCentEuro", "macCroatian", "macCyrillic", "macGreek", "macIceland", "macRoman", "macRomania", "macThai", "macTurkish", "macUkraine", "CP950", "CP951", "IBM037", "stateless-ISO-2022-JP", "eucJP-ms", "CP51932", "EUC-JIS-2004", "GB12345", "ISO-2022-JP", "ISO-2022-JP-2", "CP50220", "CP50221", "Windows-1256", "Windows-1253", "Windows-1255", "Windows-1254", "TIS-620", "Windows-874", "Windows-1257", "MacJapanese", "UTF-7", "UTF8-MAC", "UTF-16", "UTF-32", "UTF8-DoCoMo", "SJIS-DoCoMo", "UTF8-KDDI", "SJIS-KDDI", "ISO-2022-JP-KDDI", "stateless-ISO-2022-JP-KDDI", "UTF8-SoftBank", "SJIS-SoftBank", "BINARY", "CP437", "CP737", "CP775", "IBM850", "CP857", "CP860", "CP861", "CP862", "CP863", "CP864", "CP865", "CP866", "CP869", "CP1258", "Big5-HKSCS:2008", "ebcdic-cp-us", "eucJP", "euc-jp-ms", "EUC-JISX0213", "eucKR", "eucTW", "EUC-CN", "eucCN", "CP936", "ISO2022-JP", "ISO2022-JP2", "ISO8859-1", "ISO8859-2", "ISO8859-3", "ISO8859-4", "ISO8859-5", "ISO8859-6", "CP1256", "ISO8859-7", "CP1253", "ISO8859-8", "CP1255", "ISO8859-9", "CP1254", "ISO8859-10", "ISO8859-11", "CP874", "ISO8859-13", "CP1257", "ISO8859-14", "ISO8859-15", "ISO8859-16", "CP878", "MacJapan", "ASCII", "ANSI_X3.4-1968", "646", "CP65000", "CP65001", "UTF-8-MAC", "UTF-8-HFS", "UCS-2BE", "UCS-4BE", "UCS-4LE", "CP932", "csWindows31J", "SJIS", "PCK", "CP1250", "CP1251", "CP1252", "external", "locale"]`|No |=======================================================================   diff --git a/docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc b/docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc index 6137822a0..c0868bfd1 100644 --- a/docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc +++ b/docs/versioned-plugins/codecs/rubydebug-v3.0.3.asciidoc @@ -12,7 +12,7 @@ START - GENERATED VARIABLES, DO NOT EDIT! END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Rubydebug codec plugin {version} @@ -30,7 +30,7 @@ the Ruby Awesome Print library. [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-metadata>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No |=======================================================================   @@ -38,7 +38,7 @@ the Ruby Awesome Print library. [id="{version}-plugins-{type}s-{plugin}-metadata"] ===== `metadata` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Should the event's metadata be included? diff --git a/docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc b/docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc index a77db3a2d..32642161c 100644 --- a/docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc +++ b/docs/versioned-plugins/codecs/rubydebug-v3.0.4.asciidoc @@ -30,7 +30,7 @@ the Ruby Awesome Print library. [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-metadata>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No |=======================================================================   @@ -38,7 +38,7 @@ the Ruby Awesome Print library. [id="{version}-plugins-{type}s-{plugin}-metadata"] ===== `metadata` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Should the event's metadata be included? diff --git a/docs/versioned-plugins/codecs/rubydebug-v3.0.5.asciidoc b/docs/versioned-plugins/codecs/rubydebug-v3.0.5.asciidoc index db152424a..2bd9155be 100644 --- a/docs/versioned-plugins/codecs/rubydebug-v3.0.5.asciidoc +++ b/docs/versioned-plugins/codecs/rubydebug-v3.0.5.asciidoc @@ -30,7 +30,7 @@ the Ruby Awesome Print library. [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-metadata>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-metadata>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No |=======================================================================   @@ -38,7 +38,7 @@ the Ruby Awesome Print library. [id="{version}-plugins-{type}s-{plugin}-metadata"] ===== `metadata` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Should the event's metadata be included? diff --git a/docs/versioned-plugins/filters/grok-v3.4.1.asciidoc b/docs/versioned-plugins/filters/grok-v3.4.1.asciidoc index 1df98144a..f343f020d 100644 --- a/docs/versioned-plugins/filters/grok-v3.4.1.asciidoc +++ b/docs/versioned-plugins/filters/grok-v3.4.1.asciidoc @@ -7,12 +7,12 @@ START - GENERATED VARIABLES, DO NOT EDIT! :version: v3.4.1 :release_date: 2017-05-10 :changelog_url: https://github.com/logstash-plugins/logstash-filter-grok/blob/v3.4.1/CHANGELOG.md -:include_path: ../../../logstash/docs/include +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Grok @@ -160,25 +160,25 @@ filter. This newly defined patterns in `pattern_definitions` will not be availab [id="{version}-plugins-{type}s-{plugin}-options"] ==== Grok Filter Configuration Options -This plugin supports the following configuration options plus the <> described later. +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-match>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= -Also see <> for a list of options supported by all +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all filter plugins.   @@ -186,7 +186,7 @@ filter plugins. [id="{version}-plugins-{type}s-{plugin}-break_on_match"] ===== `break_on_match` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Break on first match. The first successful match by grok will result in the @@ -196,7 +196,7 @@ parsing different things), then set this to false. [id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] ===== `keep_empty_captures` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` If `true`, keep empty captures as event fields. @@ -204,7 +204,7 @@ If `true`, keep empty captures as event fields. [id="{version}-plugins-{type}s-{plugin}-match"] ===== `match` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` A hash of matches of field => value @@ -225,7 +225,7 @@ If you need to match multiple patterns against a single field, the value can be [id="{version}-plugins-{type}s-{plugin}-named_captures_only"] ===== `named_captures_only` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` If `true`, only store named captures from grok. @@ -233,7 +233,7 @@ If `true`, only store named captures from grok. [id="{version}-plugins-{type}s-{plugin}-overwrite"] ===== `overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` The fields to overwrite. @@ -256,7 +256,7 @@ will be parsed and `hello world` will overwrite the original message. [id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] ===== `pattern_definitions` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` A hash of pattern-name and pattern tuples defining custom patterns to be used by @@ -267,7 +267,7 @@ grok [id="{version}-plugins-{type}s-{plugin}-patterns_dir"] ===== `patterns_dir` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` @@ -292,7 +292,7 @@ The patterns are loaded when the pipeline is created. [id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] ===== `patterns_files_glob` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"*"` Glob pattern, used to select the pattern files in the directories @@ -301,7 +301,7 @@ specified by patterns_dir [id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] ===== `tag_on_failure` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `["_grokparsefailure"]` Append values to the `tags` field when there has been no @@ -310,7 +310,7 @@ successful match [id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] ===== `tag_on_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"_groktimeout"` Tag to apply if a grok regexp times out. @@ -318,7 +318,7 @@ Tag to apply if a grok regexp times out. [id="{version}-plugins-{type}s-{plugin}-timeout_millis"] ===== `timeout_millis` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `30000` Attempt to terminate regexps after this amount of time. @@ -328,5 +328,5 @@ Actual timeout is approximate based on a 250ms quantization. Set to 0 to disable timeouts - +[id="{version}-plugins-{type}s-{plugin}-common-options"] include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/grok-v3.4.2.asciidoc b/docs/versioned-plugins/filters/grok-v3.4.2.asciidoc index ecf226d27..d18dc9cea 100644 --- a/docs/versioned-plugins/filters/grok-v3.4.2.asciidoc +++ b/docs/versioned-plugins/filters/grok-v3.4.2.asciidoc @@ -12,7 +12,7 @@ START - GENERATED VARIABLES, DO NOT EDIT! END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Grok filter plugin {version} @@ -165,17 +165,17 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-match>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -186,7 +186,7 @@ filter plugins. [id="{version}-plugins-{type}s-{plugin}-break_on_match"] ===== `break_on_match` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Break on first match. The first successful match by grok will result in the @@ -196,7 +196,7 @@ parsing different things), then set this to false. [id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] ===== `keep_empty_captures` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` If `true`, keep empty captures as event fields. @@ -204,7 +204,7 @@ If `true`, keep empty captures as event fields. [id="{version}-plugins-{type}s-{plugin}-match"] ===== `match` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` A hash of matches of field => value @@ -225,7 +225,7 @@ If you need to match multiple patterns against a single field, the value can be [id="{version}-plugins-{type}s-{plugin}-named_captures_only"] ===== `named_captures_only` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` If `true`, only store named captures from grok. @@ -233,7 +233,7 @@ If `true`, only store named captures from grok. [id="{version}-plugins-{type}s-{plugin}-overwrite"] ===== `overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` The fields to overwrite. @@ -256,7 +256,7 @@ will be parsed and `hello world` will overwrite the original message. [id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] ===== `pattern_definitions` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` A hash of pattern-name and pattern tuples defining custom patterns to be used by @@ -267,7 +267,7 @@ grok [id="{version}-plugins-{type}s-{plugin}-patterns_dir"] ===== `patterns_dir` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` @@ -292,7 +292,7 @@ The patterns are loaded when the pipeline is created. [id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] ===== `patterns_files_glob` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"*"` Glob pattern, used to select the pattern files in the directories @@ -301,7 +301,7 @@ specified by patterns_dir [id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] ===== `tag_on_failure` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `["_grokparsefailure"]` Append values to the `tags` field when there has been no @@ -310,7 +310,7 @@ successful match [id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] ===== `tag_on_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"_groktimeout"` Tag to apply if a grok regexp times out. @@ -318,7 +318,7 @@ Tag to apply if a grok regexp times out. [id="{version}-plugins-{type}s-{plugin}-timeout_millis"] ===== `timeout_millis` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `30000` Attempt to terminate regexps after this amount of time. diff --git a/docs/versioned-plugins/filters/grok-v3.4.3.asciidoc b/docs/versioned-plugins/filters/grok-v3.4.3.asciidoc index 65043d69c..2a3341ebc 100644 --- a/docs/versioned-plugins/filters/grok-v3.4.3.asciidoc +++ b/docs/versioned-plugins/filters/grok-v3.4.3.asciidoc @@ -164,17 +164,17 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-match>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -185,7 +185,7 @@ filter plugins. [id="{version}-plugins-{type}s-{plugin}-break_on_match"] ===== `break_on_match` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Break on first match. The first successful match by grok will result in the @@ -195,7 +195,7 @@ parsing different things), then set this to false. [id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] ===== `keep_empty_captures` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` If `true`, keep empty captures as event fields. @@ -203,7 +203,7 @@ If `true`, keep empty captures as event fields. [id="{version}-plugins-{type}s-{plugin}-match"] ===== `match` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` A hash of matches of field => value @@ -224,7 +224,7 @@ If you need to match multiple patterns against a single field, the value can be [id="{version}-plugins-{type}s-{plugin}-named_captures_only"] ===== `named_captures_only` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` If `true`, only store named captures from grok. @@ -232,7 +232,7 @@ If `true`, only store named captures from grok. [id="{version}-plugins-{type}s-{plugin}-overwrite"] ===== `overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` The fields to overwrite. @@ -255,7 +255,7 @@ will be parsed and `hello world` will overwrite the original message. [id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] ===== `pattern_definitions` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` A hash of pattern-name and pattern tuples defining custom patterns to be used by @@ -266,7 +266,7 @@ grok [id="{version}-plugins-{type}s-{plugin}-patterns_dir"] ===== `patterns_dir` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` @@ -291,7 +291,7 @@ The patterns are loaded when the pipeline is created. [id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] ===== `patterns_files_glob` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"*"` Glob pattern, used to select the pattern files in the directories @@ -300,7 +300,7 @@ specified by patterns_dir [id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] ===== `tag_on_failure` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `["_grokparsefailure"]` Append values to the `tags` field when there has been no @@ -309,7 +309,7 @@ successful match [id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] ===== `tag_on_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"_groktimeout"` Tag to apply if a grok regexp times out. @@ -317,7 +317,7 @@ Tag to apply if a grok regexp times out. [id="{version}-plugins-{type}s-{plugin}-timeout_millis"] ===== `timeout_millis` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `30000` Attempt to terminate regexps after this amount of time. diff --git a/docs/versioned-plugins/filters/grok-v3.4.4.asciidoc b/docs/versioned-plugins/filters/grok-v3.4.4.asciidoc index 55a2a504b..eb266e249 100644 --- a/docs/versioned-plugins/filters/grok-v3.4.4.asciidoc +++ b/docs/versioned-plugins/filters/grok-v3.4.4.asciidoc @@ -164,17 +164,17 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-match>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -185,7 +185,7 @@ filter plugins. [id="{version}-plugins-{type}s-{plugin}-break_on_match"] ===== `break_on_match` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Break on first match. The first successful match by grok will result in the @@ -195,7 +195,7 @@ parsing different things), then set this to false. [id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] ===== `keep_empty_captures` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` If `true`, keep empty captures as event fields. @@ -203,7 +203,7 @@ If `true`, keep empty captures as event fields. [id="{version}-plugins-{type}s-{plugin}-match"] ===== `match` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` A hash of matches of field => value @@ -224,7 +224,7 @@ If you need to match multiple patterns against a single field, the value can be [id="{version}-plugins-{type}s-{plugin}-named_captures_only"] ===== `named_captures_only` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` If `true`, only store named captures from grok. @@ -232,7 +232,7 @@ If `true`, only store named captures from grok. [id="{version}-plugins-{type}s-{plugin}-overwrite"] ===== `overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` The fields to overwrite. @@ -255,7 +255,7 @@ will be parsed and `hello world` will overwrite the original message. [id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] ===== `pattern_definitions` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` A hash of pattern-name and pattern tuples defining custom patterns to be used by @@ -266,7 +266,7 @@ grok [id="{version}-plugins-{type}s-{plugin}-patterns_dir"] ===== `patterns_dir` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` @@ -291,7 +291,7 @@ The patterns are loaded when the pipeline is created. [id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] ===== `patterns_files_glob` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"*"` Glob pattern, used to select the pattern files in the directories @@ -300,7 +300,7 @@ specified by patterns_dir [id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] ===== `tag_on_failure` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `["_grokparsefailure"]` Append values to the `tags` field when there has been no @@ -309,7 +309,7 @@ successful match [id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] ===== `tag_on_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"_groktimeout"` Tag to apply if a grok regexp times out. @@ -317,7 +317,7 @@ Tag to apply if a grok regexp times out. [id="{version}-plugins-{type}s-{plugin}-timeout_millis"] ===== `timeout_millis` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `30000` Attempt to terminate regexps after this amount of time. diff --git a/docs/versioned-plugins/filters/grok-v4.0.0.asciidoc b/docs/versioned-plugins/filters/grok-v4.0.0.asciidoc index f81da7ea2..81b51548c 100644 --- a/docs/versioned-plugins/filters/grok-v4.0.0.asciidoc +++ b/docs/versioned-plugins/filters/grok-v4.0.0.asciidoc @@ -164,17 +164,17 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-match>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -185,7 +185,7 @@ filter plugins. [id="{version}-plugins-{type}s-{plugin}-break_on_match"] ===== `break_on_match` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Break on first match. The first successful match by grok will result in the @@ -195,7 +195,7 @@ parsing different things), then set this to false. [id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] ===== `keep_empty_captures` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` If `true`, keep empty captures as event fields. @@ -203,7 +203,7 @@ If `true`, keep empty captures as event fields. [id="{version}-plugins-{type}s-{plugin}-match"] ===== `match` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` A hash of matches of field => value @@ -224,7 +224,7 @@ If you need to match multiple patterns against a single field, the value can be [id="{version}-plugins-{type}s-{plugin}-named_captures_only"] ===== `named_captures_only` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` If `true`, only store named captures from grok. @@ -232,7 +232,7 @@ If `true`, only store named captures from grok. [id="{version}-plugins-{type}s-{plugin}-overwrite"] ===== `overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` The fields to overwrite. @@ -255,7 +255,7 @@ will be parsed and `hello world` will overwrite the original message. [id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] ===== `pattern_definitions` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` A hash of pattern-name and pattern tuples defining custom patterns to be used by @@ -266,7 +266,7 @@ grok [id="{version}-plugins-{type}s-{plugin}-patterns_dir"] ===== `patterns_dir` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` @@ -291,7 +291,7 @@ The patterns are loaded when the pipeline is created. [id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] ===== `patterns_files_glob` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"*"` Glob pattern, used to select the pattern files in the directories @@ -300,7 +300,7 @@ specified by patterns_dir [id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] ===== `tag_on_failure` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `["_grokparsefailure"]` Append values to the `tags` field when there has been no @@ -309,7 +309,7 @@ successful match [id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] ===== `tag_on_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"_groktimeout"` Tag to apply if a grok regexp times out. @@ -317,7 +317,7 @@ Tag to apply if a grok regexp times out. [id="{version}-plugins-{type}s-{plugin}-timeout_millis"] ===== `timeout_millis` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `30000` Attempt to terminate regexps after this amount of time. diff --git a/docs/versioned-plugins/filters/grok-v4.0.1.asciidoc b/docs/versioned-plugins/filters/grok-v4.0.1.asciidoc index bde168c91..43e802c99 100644 --- a/docs/versioned-plugins/filters/grok-v4.0.1.asciidoc +++ b/docs/versioned-plugins/filters/grok-v4.0.1.asciidoc @@ -164,17 +164,17 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-match>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-break_on_match>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-keep_empty_captures>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-match>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-named_captures_only>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-overwrite>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-pattern_definitions>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_dir>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-patterns_files_glob>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_failure>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_timeout>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-timeout_millis>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -185,7 +185,7 @@ filter plugins. [id="{version}-plugins-{type}s-{plugin}-break_on_match"] ===== `break_on_match` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Break on first match. The first successful match by grok will result in the @@ -195,7 +195,7 @@ parsing different things), then set this to false. [id="{version}-plugins-{type}s-{plugin}-keep_empty_captures"] ===== `keep_empty_captures` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` If `true`, keep empty captures as event fields. @@ -203,7 +203,7 @@ If `true`, keep empty captures as event fields. [id="{version}-plugins-{type}s-{plugin}-match"] ===== `match` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` A hash of matches of field => value @@ -224,7 +224,7 @@ If you need to match multiple patterns against a single field, the value can be [id="{version}-plugins-{type}s-{plugin}-named_captures_only"] ===== `named_captures_only` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` If `true`, only store named captures from grok. @@ -232,7 +232,7 @@ If `true`, only store named captures from grok. [id="{version}-plugins-{type}s-{plugin}-overwrite"] ===== `overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` The fields to overwrite. @@ -255,7 +255,7 @@ will be parsed and `hello world` will overwrite the original message. [id="{version}-plugins-{type}s-{plugin}-pattern_definitions"] ===== `pattern_definitions` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` A hash of pattern-name and pattern tuples defining custom patterns to be used by @@ -266,7 +266,7 @@ grok [id="{version}-plugins-{type}s-{plugin}-patterns_dir"] ===== `patterns_dir` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` @@ -291,7 +291,7 @@ The patterns are loaded when the pipeline is created. [id="{version}-plugins-{type}s-{plugin}-patterns_files_glob"] ===== `patterns_files_glob` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"*"` Glob pattern, used to select the pattern files in the directories @@ -300,7 +300,7 @@ specified by patterns_dir [id="{version}-plugins-{type}s-{plugin}-tag_on_failure"] ===== `tag_on_failure` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `["_grokparsefailure"]` Append values to the `tags` field when there has been no @@ -309,7 +309,7 @@ successful match [id="{version}-plugins-{type}s-{plugin}-tag_on_timeout"] ===== `tag_on_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"_groktimeout"` Tag to apply if a grok regexp times out. @@ -317,7 +317,7 @@ Tag to apply if a grok regexp times out. [id="{version}-plugins-{type}s-{plugin}-timeout_millis"] ===== `timeout_millis` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `30000` Attempt to terminate regexps after this amount of time. diff --git a/docs/versioned-plugins/filters/mutate-v3.1.5.asciidoc b/docs/versioned-plugins/filters/mutate-v3.1.5.asciidoc index 5cb812257..6d0a3ccc0 100644 --- a/docs/versioned-plugins/filters/mutate-v3.1.5.asciidoc +++ b/docs/versioned-plugins/filters/mutate-v3.1.5.asciidoc @@ -12,7 +12,7 @@ START - GENERATED VARIABLES, DO NOT EDIT! END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Mutate filter plugin {version} @@ -31,18 +31,18 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-copy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gsub>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-join>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lowercase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-merge>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-rename>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-replace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-split>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-strip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-update>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-uppercase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-convert>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-copy>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-gsub>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-join>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-lowercase>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-merge>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-rename>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-replace>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-split>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-strip>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-update>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-uppercase>> |{logstash-ref}/configuration-file-structure.html#array[array]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -53,7 +53,7 @@ filter plugins. [id="{version}-plugins-{type}s-{plugin}-convert"] ===== `convert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Convert a field's value to a different type, like turning a string to an @@ -81,7 +81,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-copy"] ===== `copy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Copy an existing field to another field. Existing target field will be overriden. @@ -97,7 +97,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-gsub"] ===== `gsub` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * There is no default value for this setting. Convert a string field by applying a regular expression and a replacement. @@ -126,7 +126,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-join"] ===== `join` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Join an array with a separator character. Does nothing on non-array fields. @@ -142,7 +142,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-lowercase"] ===== `lowercase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * There is no default value for this setting. Convert a string to its lowercase equivalent. @@ -158,7 +158,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-merge"] ===== `merge` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Merge two fields of arrays or hashes. @@ -179,7 +179,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-rename"] ===== `rename` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Rename one or more fields. @@ -196,7 +196,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-replace"] ===== `replace` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Replace a field with a new value. The new value can include `%{foo}` strings @@ -213,7 +213,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-split"] ===== `split` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Split a field to an array using a separator character. Only works on string @@ -230,7 +230,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-strip"] ===== `strip` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * There is no default value for this setting. Strip whitespace from field. NOTE: this only works on leading and trailing whitespace. @@ -246,7 +246,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-update"] ===== `update` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Update an existing field with a new value. If the field does not exist, @@ -263,7 +263,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-uppercase"] ===== `uppercase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * There is no default value for this setting. Convert a string to its uppercase equivalent. diff --git a/docs/versioned-plugins/filters/mutate-v3.1.6.asciidoc b/docs/versioned-plugins/filters/mutate-v3.1.6.asciidoc index d8cc45f7d..f8e360d43 100644 --- a/docs/versioned-plugins/filters/mutate-v3.1.6.asciidoc +++ b/docs/versioned-plugins/filters/mutate-v3.1.6.asciidoc @@ -31,18 +31,18 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-copy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gsub>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-join>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lowercase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-merge>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-rename>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-replace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-split>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-strip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-update>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-uppercase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-convert>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-copy>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-gsub>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-join>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-lowercase>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-merge>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-rename>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-replace>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-split>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-strip>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-update>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-uppercase>> |{logstash-ref}/configuration-file-structure.html#array[array]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -53,7 +53,7 @@ filter plugins. [id="{version}-plugins-{type}s-{plugin}-convert"] ===== `convert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Convert a field's value to a different type, like turning a string to an @@ -81,7 +81,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-copy"] ===== `copy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Copy an existing field to another field. Existing target field will be overriden. @@ -97,7 +97,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-gsub"] ===== `gsub` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * There is no default value for this setting. Match a regular expression against a field value and replace all matches @@ -127,7 +127,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-join"] ===== `join` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Join an array with a separator character. Does nothing on non-array fields. @@ -143,7 +143,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-lowercase"] ===== `lowercase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * There is no default value for this setting. Convert a string to its lowercase equivalent. @@ -159,7 +159,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-merge"] ===== `merge` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Merge two fields of arrays or hashes. @@ -180,7 +180,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-rename"] ===== `rename` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Rename one or more fields. @@ -197,7 +197,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-replace"] ===== `replace` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Replace a field with a new value. The new value can include `%{foo}` strings @@ -214,7 +214,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-split"] ===== `split` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Split a field to an array using a separator character. Only works on string @@ -231,7 +231,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-strip"] ===== `strip` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * There is no default value for this setting. Strip whitespace from field. NOTE: this only works on leading and trailing whitespace. @@ -247,7 +247,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-update"] ===== `update` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Update an existing field with a new value. If the field does not exist, @@ -264,7 +264,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-uppercase"] ===== `uppercase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * There is no default value for this setting. Convert a string to its uppercase equivalent. diff --git a/docs/versioned-plugins/filters/mutate-v3.1.7.asciidoc b/docs/versioned-plugins/filters/mutate-v3.1.7.asciidoc index 29039dbc2..0be673a5c 100644 --- a/docs/versioned-plugins/filters/mutate-v3.1.7.asciidoc +++ b/docs/versioned-plugins/filters/mutate-v3.1.7.asciidoc @@ -31,18 +31,18 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-copy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gsub>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-join>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lowercase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-merge>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-rename>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-replace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-split>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-strip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-update>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-uppercase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-convert>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-copy>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-gsub>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-join>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-lowercase>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-merge>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-rename>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-replace>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-split>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-strip>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-update>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-uppercase>> |{logstash-ref}/configuration-file-structure.html#array[array]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -53,7 +53,7 @@ filter plugins. [id="{version}-plugins-{type}s-{plugin}-convert"] ===== `convert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Convert a field's value to a different type, like turning a string to an @@ -81,7 +81,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-copy"] ===== `copy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Copy an existing field to another field. Existing target field will be overriden. @@ -97,7 +97,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-gsub"] ===== `gsub` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * There is no default value for this setting. Match a regular expression against a field value and replace all matches @@ -127,7 +127,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-join"] ===== `join` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Join an array with a separator character. Does nothing on non-array fields. @@ -143,7 +143,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-lowercase"] ===== `lowercase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * There is no default value for this setting. Convert a string to its lowercase equivalent. @@ -159,7 +159,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-merge"] ===== `merge` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Merge two fields of arrays or hashes. @@ -180,7 +180,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-rename"] ===== `rename` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Rename one or more fields. @@ -197,7 +197,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-replace"] ===== `replace` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Replace a field with a new value. The new value can include `%{foo}` strings @@ -214,7 +214,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-split"] ===== `split` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Split a field to an array using a separator character. Only works on string @@ -231,7 +231,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-strip"] ===== `strip` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * There is no default value for this setting. Strip whitespace from field. NOTE: this only works on leading and trailing whitespace. @@ -247,7 +247,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-update"] ===== `update` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Update an existing field with a new value. If the field does not exist, @@ -264,7 +264,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-uppercase"] ===== `uppercase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * There is no default value for this setting. Convert a string to its uppercase equivalent. diff --git a/docs/versioned-plugins/filters/mutate-v3.2.0.asciidoc b/docs/versioned-plugins/filters/mutate-v3.2.0.asciidoc index 804b57d2f..b55d1273e 100644 --- a/docs/versioned-plugins/filters/mutate-v3.2.0.asciidoc +++ b/docs/versioned-plugins/filters/mutate-v3.2.0.asciidoc @@ -31,18 +31,18 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-convert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-copy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gsub>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-join>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-lowercase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-merge>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-rename>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-replace>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-split>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-strip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-update>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-uppercase>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-convert>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-copy>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-gsub>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-join>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-lowercase>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-merge>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-rename>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-replace>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-split>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-strip>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-update>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-uppercase>> |{logstash-ref}/configuration-file-structure.html#array[array]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -53,7 +53,7 @@ filter plugins. [id="{version}-plugins-{type}s-{plugin}-convert"] ===== `convert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Convert a field's value to a different type, like turning a string to an @@ -85,7 +85,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-copy"] ===== `copy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Copy an existing field to another field. Existing target field will be overriden. @@ -101,7 +101,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-gsub"] ===== `gsub` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * There is no default value for this setting. Match a regular expression against a field value and replace all matches @@ -131,7 +131,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-join"] ===== `join` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Join an array with a separator character. Does nothing on non-array fields. @@ -147,7 +147,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-lowercase"] ===== `lowercase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * There is no default value for this setting. Convert a string to its lowercase equivalent. @@ -163,7 +163,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-merge"] ===== `merge` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Merge two fields of arrays or hashes. @@ -184,7 +184,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-rename"] ===== `rename` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Rename one or more fields. @@ -201,7 +201,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-replace"] ===== `replace` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Replace a field with a new value. The new value can include `%{foo}` strings @@ -218,7 +218,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-split"] ===== `split` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Split a field to an array using a separator character. Only works on string @@ -235,7 +235,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-strip"] ===== `strip` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * There is no default value for this setting. Strip whitespace from field. NOTE: this only works on leading and trailing whitespace. @@ -251,7 +251,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-update"] ===== `update` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Update an existing field with a new value. If the field does not exist, @@ -268,7 +268,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-uppercase"] ===== `uppercase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * There is no default value for this setting. Convert a string to its uppercase equivalent. diff --git a/docs/versioned-plugins/filters/ruby-v3.0.3.asciidoc b/docs/versioned-plugins/filters/ruby-v3.0.3.asciidoc index c5699b506..dc0347fe9 100644 --- a/docs/versioned-plugins/filters/ruby-v3.0.3.asciidoc +++ b/docs/versioned-plugins/filters/ruby-v3.0.3.asciidoc @@ -12,7 +12,7 @@ START - GENERATED VARIABLES, DO NOT EDIT! END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Ruby filter plugin {version} @@ -49,8 +49,8 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-init>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-code>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-init>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -62,16 +62,16 @@ filter plugins. ===== `code` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The code to execute for every event. -You will have an `event` variable available that is the event itself. See the <> for more information. +You will have an `event` variable available that is the event itself. See the {logstash-ref}/event-api.html[Event API] for more information. [id="{version}-plugins-{type}s-{plugin}-init"] ===== `init` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Any code to execute at logstash startup-time diff --git a/docs/versioned-plugins/filters/ruby-v3.0.4.asciidoc b/docs/versioned-plugins/filters/ruby-v3.0.4.asciidoc index b73bf0a05..caa33fe6a 100644 --- a/docs/versioned-plugins/filters/ruby-v3.0.4.asciidoc +++ b/docs/versioned-plugins/filters/ruby-v3.0.4.asciidoc @@ -49,8 +49,8 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-code>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-init>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-code>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-init>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -62,16 +62,16 @@ filter plugins. ===== `code` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The code to execute for every event. -You will have an `event` variable available that is the event itself. See the <> for more information. +You will have an `event` variable available that is the event itself. See the {logstash-ref}/event-api.html[Event API] for more information. [id="{version}-plugins-{type}s-{plugin}-init"] ===== `init` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Any code to execute at logstash startup-time diff --git a/docs/versioned-plugins/filters/ruby-v3.1.0.asciidoc b/docs/versioned-plugins/filters/ruby-v3.1.0.asciidoc index 36a09bfe3..8b72ba770 100644 --- a/docs/versioned-plugins/filters/ruby-v3.1.0.asciidoc +++ b/docs/versioned-plugins/filters/ruby-v3.1.0.asciidoc @@ -86,7 +86,7 @@ def filter(event) end ---- -====== Testing the ruby script +===== Testing the ruby script To validate the behaviour of the `filter` method you implemented, the Ruby filter plugin provides an inline test framework where you @@ -124,7 +124,7 @@ end We can now test that the ruby script we're using is implemented correctly: -[source] +[source,shell] ---- % bin/logstash -e "filter { ruby { path => '/etc/logstash/drop_percentage.rb' script_params => { 'drop_percentage' => 0.5 } } }" -t [2017-10-13T13:44:29,723][INFO ][logstash.filters.ruby.script] Test run complete {:script_path=>"/etc/logstash/drop_percentage.rb", :results=>{:passed=>1, :failed=>0, :errored=>0}} @@ -140,11 +140,11 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-code>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-init>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_params>> |<>,{}|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_exception>> |<>,_rubyexception|No +| <<{version}-plugins-{type}s-{plugin}-code>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-init>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_params>> |{logstash-ref}/configuration-file-structure.html#hash[hash],{}|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_exception>> |{logstash-ref}/configuration-file-structure.html#string[string],_rubyexception|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -155,17 +155,17 @@ filter plugins. [id="{version}-plugins-{type}s-{plugin}-code"] ===== `code` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. * This setting cannot be used together with `path`. The code to execute for every event. -You will have an `event` variable available that is the event itself. See the <> for more information. +You will have an `event` variable available that is the event itself. See the {logstash-ref}/event-api.html[Event API] for more information. [id="{version}-plugins-{type}s-{plugin}-init"] ===== `init` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Any code to execute at logstash startup-time @@ -173,7 +173,7 @@ Any code to execute at logstash startup-time [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. * This setting cannot be used together with `code`. @@ -182,11 +182,20 @@ The path of the ruby script file that implements the `filter` method. [id="{version}-plugins-{type}s-{plugin}-script_params"] ===== `script_params` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` A key/value hash with parameters that are passed to the register method of your ruby script file defined in `path`. +[id="{version}-plugins-{type}s-{plugin}-tag_on_exception"] +===== `tag_on_exception` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `_rubyexception` + +Tag to add to events in case the ruby code (either inline or file based) +causes an exception. + [id="{version}-plugins-{type}s-{plugin}-common-options"] include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/ruby-v3.1.1.asciidoc b/docs/versioned-plugins/filters/ruby-v3.1.1.asciidoc index 4ba628eb6..41d96997e 100644 --- a/docs/versioned-plugins/filters/ruby-v3.1.1.asciidoc +++ b/docs/versioned-plugins/filters/ruby-v3.1.1.asciidoc @@ -86,7 +86,7 @@ def filter(event) end ---- -====== Testing the ruby script +===== Testing the ruby script To validate the behaviour of the `filter` method you implemented, the Ruby filter plugin provides an inline test framework where you @@ -124,7 +124,7 @@ end We can now test that the ruby script we're using is implemented correctly: -[source] +[source,shell] ---- % bin/logstash -e "filter { ruby { path => '/etc/logstash/drop_percentage.rb' script_params => { 'drop_percentage' => 0.5 } } }" -t [2017-10-13T13:44:29,723][INFO ][logstash.filters.ruby.script] Test run complete {:script_path=>"/etc/logstash/drop_percentage.rb", :results=>{:passed=>1, :failed=>0, :errored=>0}} @@ -140,11 +140,11 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-code>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-init>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_params>> |<>,{}|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_exception>> |<>,_rubyexception|No +| <<{version}-plugins-{type}s-{plugin}-code>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-init>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_params>> |{logstash-ref}/configuration-file-structure.html#hash[hash],{}|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_exception>> |{logstash-ref}/configuration-file-structure.html#string[string],_rubyexception|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -155,17 +155,17 @@ filter plugins. [id="{version}-plugins-{type}s-{plugin}-code"] ===== `code` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. * This setting cannot be used together with `path`. The code to execute for every event. -You will have an `event` variable available that is the event itself. See the <> for more information. +You will have an `event` variable available that is the event itself. See the {logstash-ref}/event-api.html[Event API] for more information. [id="{version}-plugins-{type}s-{plugin}-init"] ===== `init` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Any code to execute at logstash startup-time @@ -173,7 +173,7 @@ Any code to execute at logstash startup-time [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. * This setting cannot be used together with `code`. @@ -182,11 +182,20 @@ The path of the ruby script file that implements the `filter` method. [id="{version}-plugins-{type}s-{plugin}-script_params"] ===== `script_params` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` A key/value hash with parameters that are passed to the register method of your ruby script file defined in `path`. +[id="{version}-plugins-{type}s-{plugin}-tag_on_exception"] +===== `tag_on_exception` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `_rubyexception` + +Tag to add to events in case the ruby code (either inline or file based) +causes an exception. + [id="{version}-plugins-{type}s-{plugin}-common-options"] include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/ruby-v3.1.2.asciidoc b/docs/versioned-plugins/filters/ruby-v3.1.2.asciidoc index ad4fc8d37..375e5a411 100644 --- a/docs/versioned-plugins/filters/ruby-v3.1.2.asciidoc +++ b/docs/versioned-plugins/filters/ruby-v3.1.2.asciidoc @@ -86,7 +86,7 @@ def filter(event) end ---- -====== Testing the ruby script +===== Testing the ruby script To validate the behaviour of the `filter` method you implemented, the Ruby filter plugin provides an inline test framework where you @@ -124,7 +124,7 @@ end We can now test that the ruby script we're using is implemented correctly: -[source] +[source,shell] ---- % bin/logstash -e "filter { ruby { path => '/etc/logstash/drop_percentage.rb' script_params => { 'drop_percentage' => 0.5 } } }" -t [2017-10-13T13:44:29,723][INFO ][logstash.filters.ruby.script] Test run complete {:script_path=>"/etc/logstash/drop_percentage.rb", :results=>{:passed=>1, :failed=>0, :errored=>0}} @@ -140,11 +140,11 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-code>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-init>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_params>> |<>,{}|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_exception>> |<>,_rubyexception|No +| <<{version}-plugins-{type}s-{plugin}-code>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-init>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_params>> |{logstash-ref}/configuration-file-structure.html#hash[hash],{}|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_exception>> |{logstash-ref}/configuration-file-structure.html#string[string],_rubyexception|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -155,17 +155,17 @@ filter plugins. [id="{version}-plugins-{type}s-{plugin}-code"] ===== `code` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. * This setting cannot be used together with `path`. The code to execute for every event. -You will have an `event` variable available that is the event itself. See the <> for more information. +You will have an `event` variable available that is the event itself. See the {logstash-ref}/event-api.html[Event API] for more information. [id="{version}-plugins-{type}s-{plugin}-init"] ===== `init` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Any code to execute at logstash startup-time @@ -173,7 +173,7 @@ Any code to execute at logstash startup-time [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. * This setting cannot be used together with `code`. @@ -182,11 +182,20 @@ The path of the ruby script file that implements the `filter` method. [id="{version}-plugins-{type}s-{plugin}-script_params"] ===== `script_params` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` A key/value hash with parameters that are passed to the register method of your ruby script file defined in `path`. +[id="{version}-plugins-{type}s-{plugin}-tag_on_exception"] +===== `tag_on_exception` + + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] + * Default value is `_rubyexception` + +Tag to add to events in case the ruby code (either inline or file based) +causes an exception. + [id="{version}-plugins-{type}s-{plugin}-common-options"] include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/filters/ruby-v3.1.3.asciidoc b/docs/versioned-plugins/filters/ruby-v3.1.3.asciidoc index 2f2cfbd53..ac5f1972d 100644 --- a/docs/versioned-plugins/filters/ruby-v3.1.3.asciidoc +++ b/docs/versioned-plugins/filters/ruby-v3.1.3.asciidoc @@ -140,11 +140,11 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-code>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-init>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_params>> |<>,{}|No -| <<{version}-plugins-{type}s-{plugin}-tag_on_exception>> |<>,_rubyexception|No +| <<{version}-plugins-{type}s-{plugin}-code>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-init>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_params>> |{logstash-ref}/configuration-file-structure.html#hash[hash],{}|No +| <<{version}-plugins-{type}s-{plugin}-tag_on_exception>> |{logstash-ref}/configuration-file-structure.html#string[string],_rubyexception|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -155,17 +155,17 @@ filter plugins. [id="{version}-plugins-{type}s-{plugin}-code"] ===== `code` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. * This setting cannot be used together with `path`. The code to execute for every event. -You will have an `event` variable available that is the event itself. See the <> for more information. +You will have an `event` variable available that is the event itself. See the {logstash-ref}/event-api.html[Event API] for more information. [id="{version}-plugins-{type}s-{plugin}-init"] ===== `init` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Any code to execute at logstash startup-time @@ -173,7 +173,7 @@ Any code to execute at logstash startup-time [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. * This setting cannot be used together with `code`. @@ -182,7 +182,7 @@ The path of the ruby script file that implements the `filter` method. [id="{version}-plugins-{type}s-{plugin}-script_params"] ===== `script_params` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` A key/value hash with parameters that are passed to the register method @@ -191,7 +191,7 @@ of your ruby script file defined in `path`. [id="{version}-plugins-{type}s-{plugin}-tag_on_exception"] ===== `tag_on_exception` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `_rubyexception` Tag to add to events in case the ruby code (either inline or file based) diff --git a/docs/versioned-plugins/filters/sleep-v3.0.4.asciidoc b/docs/versioned-plugins/filters/sleep-v3.0.4.asciidoc index a23d30cbd..c4b7829b7 100644 --- a/docs/versioned-plugins/filters/sleep-v3.0.4.asciidoc +++ b/docs/versioned-plugins/filters/sleep-v3.0.4.asciidoc @@ -12,7 +12,7 @@ START - GENERATED VARIABLES, DO NOT EDIT! END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Sleep filter plugin {version} @@ -33,9 +33,9 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-every>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-replay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-every>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-replay>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-time>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -46,7 +46,7 @@ filter plugins. [id="{version}-plugins-{type}s-{plugin}-every"] ===== `every` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `1` Sleep on every N'th. This option is ignored in replay mode. @@ -63,7 +63,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-replay"] ===== `replay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable replay mode. @@ -94,7 +94,7 @@ replay 2-times faster than the original time speed. [id="{version}-plugins-{type}s-{plugin}-time"] ===== `time` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The length of time to sleep, in seconds, for every event. diff --git a/docs/versioned-plugins/filters/sleep-v3.0.5.asciidoc b/docs/versioned-plugins/filters/sleep-v3.0.5.asciidoc index 00d28a121..9e6cd3527 100644 --- a/docs/versioned-plugins/filters/sleep-v3.0.5.asciidoc +++ b/docs/versioned-plugins/filters/sleep-v3.0.5.asciidoc @@ -33,9 +33,9 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-every>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-replay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-every>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-replay>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-time>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -46,7 +46,7 @@ filter plugins. [id="{version}-plugins-{type}s-{plugin}-every"] ===== `every` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `1` Sleep on every N'th. This option is ignored in replay mode. @@ -63,7 +63,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-replay"] ===== `replay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable replay mode. @@ -94,7 +94,7 @@ replay 2-times faster than the original time speed. [id="{version}-plugins-{type}s-{plugin}-time"] ===== `time` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The length of time to sleep, in seconds, for every event. diff --git a/docs/versioned-plugins/filters/sleep-v3.0.6.asciidoc b/docs/versioned-plugins/filters/sleep-v3.0.6.asciidoc index 398cdafd1..f868a711d 100644 --- a/docs/versioned-plugins/filters/sleep-v3.0.6.asciidoc +++ b/docs/versioned-plugins/filters/sleep-v3.0.6.asciidoc @@ -33,9 +33,9 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-every>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-replay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-time>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-every>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-replay>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-time>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -46,7 +46,7 @@ filter plugins. [id="{version}-plugins-{type}s-{plugin}-every"] ===== `every` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `1` Sleep on every N'th. This option is ignored in replay mode. @@ -63,7 +63,7 @@ Example: [id="{version}-plugins-{type}s-{plugin}-replay"] ===== `replay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable replay mode. @@ -94,7 +94,7 @@ replay 2-times faster than the original time speed. [id="{version}-plugins-{type}s-{plugin}-time"] ===== `time` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The length of time to sleep, in seconds, for every event. diff --git a/docs/versioned-plugins/index.asciidoc b/docs/versioned-plugins/index.asciidoc index d1d20fb6b..e1a56df29 100644 --- a/docs/versioned-plugins/index.asciidoc +++ b/docs/versioned-plugins/index.asciidoc @@ -11,10 +11,10 @@ include::{asciidoc-dir}/../../shared/attributes.asciidoc[] [[logstash-plugin-reference]] = Versioned Plugin Reference -include::inputs.asciidoc[] +include::inputs-index.asciidoc[] -include::outputs.asciidoc[] +include::outputs-index.asciidoc[] -include::filters.asciidoc[] +include::filters-index.asciidoc[] -include::codecs.asciidoc[] +include::codecs-index.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v4.0.1.asciidoc b/docs/versioned-plugins/inputs/beats-v4.0.1.asciidoc index c2ebe427b..f1f1882d4 100644 --- a/docs/versioned-plugins/inputs/beats-v4.0.1.asciidoc +++ b/docs/versioned-plugins/inputs/beats-v4.0.1.asciidoc @@ -7,12 +7,12 @@ START - GENERATED VARIABLES, DO NOT EDIT! :version: v4.0.1 :release_date: 2017-06-03 :changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v4.0.1/CHANGELOG.md -:include_path: ../../../logstash/docs/include +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Beats @@ -46,41 +46,41 @@ output { NOTE: The Beats shipper automatically sets the `type` field on the event. You cannot override this setting in the Logstash config. If you specify -a setting for the <> config option in +a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in Logstash, it is ignored. IMPORTANT: If you are shipping events that span multiple lines, you need to use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events before sending the event data to Logstash. You cannot use the -<> codec to handle multiline events. Doing so will +{logstash-ref}/plugins-codecs-multiline.html[Multiline codec plugin] codec to handle multiline events. Doing so will result in the failure to start Logstash. [id="{version}-plugins-{type}s-{plugin}-options"] ==== Beats Input Configuration Options -This plugin supports the following configuration options plus the <> described later. +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No -| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= -Also see <> for a list of options supported by all +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all input plugins.   @@ -88,7 +88,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-cipher_suites"] ===== `cipher_suites` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` The list of ciphers suite to use, listed by priorities. @@ -96,7 +96,7 @@ The list of ciphers suite to use, listed by priorities. [id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] ===== `client_inactivity_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Close Idle clients after X seconds of inactivity. @@ -105,7 +105,7 @@ Close Idle clients after X seconds of inactivity. ===== `congestion_threshold` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` The number of seconds before we raise a timeout. @@ -114,7 +114,7 @@ This option is useful to control how much time to wait if something is blocking [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` The IP address to listen on. @@ -122,7 +122,7 @@ The IP address to listen on. [id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] ===== `include_codec_tag` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` @@ -131,7 +131,7 @@ The IP address to listen on. ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. The port to listen on. @@ -139,7 +139,7 @@ The port to listen on. [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Events are by default sent in plain text. You can @@ -149,7 +149,7 @@ the `ssl_certificate` and `ssl_key` options. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] ===== `ssl_certificate` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate to use. @@ -157,7 +157,7 @@ SSL certificate to use. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] ===== `ssl_certificate_authorities` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Validate client certificates against these authorities. @@ -169,7 +169,7 @@ to `peer` or `force_peer` to enable the verification. [id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] ===== `ssl_handshake_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` Time in milliseconds for an incomplete ssl handshake to timeout @@ -177,7 +177,7 @@ Time in milliseconds for an incomplete ssl handshake to timeout [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key to use. @@ -187,7 +187,7 @@ for more information. [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. SSL key passphrase to use. @@ -212,7 +212,7 @@ This option needs to be used with `ssl_certificate_authorities` and a defined li ===== `target_field_for_codec` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"message"` This is the default field to which the specified codec will be applied. @@ -220,7 +220,7 @@ This is the default field to which the specified codec will be applied. [id="{version}-plugins-{type}s-{plugin}-tls_max_version"] ===== `tls_max_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1.2` The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: @@ -229,12 +229,12 @@ The maximum TLS version allowed for the encrypted connections. The value must be [id="{version}-plugins-{type}s-{plugin}-tls_min_version"] ===== `tls_min_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The minimum TLS version allowed for the encrypted connections. The value must be one of the following: 1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 - +[id="{version}-plugins-{type}s-{plugin}-common-options"] include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v4.0.2.asciidoc b/docs/versioned-plugins/inputs/beats-v4.0.2.asciidoc index 0de19813c..15b8827e7 100644 --- a/docs/versioned-plugins/inputs/beats-v4.0.2.asciidoc +++ b/docs/versioned-plugins/inputs/beats-v4.0.2.asciidoc @@ -7,12 +7,12 @@ START - GENERATED VARIABLES, DO NOT EDIT! :version: v4.0.2 :release_date: 2017-06-07 :changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v4.0.2/CHANGELOG.md -:include_path: ../../../logstash/docs/include +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Beats @@ -46,41 +46,41 @@ output { NOTE: The Beats shipper automatically sets the `type` field on the event. You cannot override this setting in the Logstash config. If you specify -a setting for the <> config option in +a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in Logstash, it is ignored. IMPORTANT: If you are shipping events that span multiple lines, you need to use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events before sending the event data to Logstash. You cannot use the -<> codec to handle multiline events. Doing so will +{logstash-ref}/plugins-codecs-multiline.html[Multiline codec plugin] codec to handle multiline events. Doing so will result in the failure to start Logstash. [id="{version}-plugins-{type}s-{plugin}-options"] ==== Beats Input Configuration Options -This plugin supports the following configuration options plus the <> described later. +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No -| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= -Also see <> for a list of options supported by all +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all input plugins.   @@ -88,7 +88,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-cipher_suites"] ===== `cipher_suites` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` The list of ciphers suite to use, listed by priorities. @@ -96,7 +96,7 @@ The list of ciphers suite to use, listed by priorities. [id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] ===== `client_inactivity_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Close Idle clients after X seconds of inactivity. @@ -105,7 +105,7 @@ Close Idle clients after X seconds of inactivity. ===== `congestion_threshold` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` The number of seconds before we raise a timeout. @@ -114,7 +114,7 @@ This option is useful to control how much time to wait if something is blocking [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` The IP address to listen on. @@ -122,7 +122,7 @@ The IP address to listen on. [id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] ===== `include_codec_tag` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` @@ -131,7 +131,7 @@ The IP address to listen on. ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. The port to listen on. @@ -139,7 +139,7 @@ The port to listen on. [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Events are by default sent in plain text. You can @@ -149,7 +149,7 @@ the `ssl_certificate` and `ssl_key` options. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] ===== `ssl_certificate` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate to use. @@ -157,7 +157,7 @@ SSL certificate to use. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] ===== `ssl_certificate_authorities` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Validate client certificates against these authorities. @@ -169,7 +169,7 @@ to `peer` or `force_peer` to enable the verification. [id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] ===== `ssl_handshake_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` Time in milliseconds for an incomplete ssl handshake to timeout @@ -177,7 +177,7 @@ Time in milliseconds for an incomplete ssl handshake to timeout [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key to use. @@ -187,7 +187,7 @@ for more information. [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. SSL key passphrase to use. @@ -212,7 +212,7 @@ This option needs to be used with `ssl_certificate_authorities` and a defined li ===== `target_field_for_codec` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"message"` This is the default field to which the specified codec will be applied. @@ -220,7 +220,7 @@ This is the default field to which the specified codec will be applied. [id="{version}-plugins-{type}s-{plugin}-tls_max_version"] ===== `tls_max_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1.2` The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: @@ -229,12 +229,12 @@ The maximum TLS version allowed for the encrypted connections. The value must be [id="{version}-plugins-{type}s-{plugin}-tls_min_version"] ===== `tls_min_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The minimum TLS version allowed for the encrypted connections. The value must be one of the following: 1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 - -include::{include_path}/{type}.asciidoc[] +[id="{version}-plugins-{type}s-{plugin}-common-options"] +include::{include_path}/{type}.asciidoc[] \ No newline at end of file diff --git a/docs/versioned-plugins/inputs/beats-v4.0.3.asciidoc b/docs/versioned-plugins/inputs/beats-v4.0.3.asciidoc index f33b7235e..b85983e15 100644 --- a/docs/versioned-plugins/inputs/beats-v4.0.3.asciidoc +++ b/docs/versioned-plugins/inputs/beats-v4.0.3.asciidoc @@ -7,12 +7,12 @@ START - GENERATED VARIABLES, DO NOT EDIT! :version: v4.0.3 :release_date: 2017-06-22 :changelog_url: https://github.com/logstash-plugins/logstash-input-beats/blob/v4.0.3/CHANGELOG.md -:include_path: ../../../logstash/docs/include +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Beats input plugin {version} @@ -46,41 +46,41 @@ output { NOTE: The Beats shipper automatically sets the `type` field on the event. You cannot override this setting in the Logstash config. If you specify -a setting for the <> config option in +a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in Logstash, it is ignored. IMPORTANT: If you are shipping events that span multiple lines, you need to use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events before sending the event data to Logstash. You cannot use the -<> codec to handle multiline events. Doing so will +{logstash-ref}/plugins-codecs-multiline.html[Multiline codec plugin] codec to handle multiline events. Doing so will result in the failure to start Logstash. [id="{version}-plugins-{type}s-{plugin}-options"] ==== Beats Input Configuration Options -This plugin supports the following configuration options plus the <> described later. +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No -| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= -Also see <> for a list of options supported by all +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all input plugins.   @@ -88,7 +88,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-cipher_suites"] ===== `cipher_suites` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` The list of ciphers suite to use, listed by priorities. @@ -96,7 +96,7 @@ The list of ciphers suite to use, listed by priorities. [id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] ===== `client_inactivity_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Close Idle clients after X seconds of inactivity. @@ -105,7 +105,7 @@ Close Idle clients after X seconds of inactivity. ===== `congestion_threshold` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` The number of seconds before we raise a timeout. @@ -114,7 +114,7 @@ This option is useful to control how much time to wait if something is blocking [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` The IP address to listen on. @@ -122,7 +122,7 @@ The IP address to listen on. [id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] ===== `include_codec_tag` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` @@ -131,7 +131,7 @@ The IP address to listen on. ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. The port to listen on. @@ -139,7 +139,7 @@ The port to listen on. [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Events are by default sent in plain text. You can @@ -149,7 +149,7 @@ the `ssl_certificate` and `ssl_key` options. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] ===== `ssl_certificate` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate to use. @@ -157,7 +157,7 @@ SSL certificate to use. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] ===== `ssl_certificate_authorities` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Validate client certificates against these authorities. @@ -169,7 +169,7 @@ to `peer` or `force_peer` to enable the verification. [id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] ===== `ssl_handshake_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` Time in milliseconds for an incomplete ssl handshake to timeout @@ -177,7 +177,7 @@ Time in milliseconds for an incomplete ssl handshake to timeout [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key to use. @@ -187,7 +187,7 @@ for more information. [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. SSL key passphrase to use. @@ -212,7 +212,7 @@ This option needs to be used with `ssl_certificate_authorities` and a defined li ===== `target_field_for_codec` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"message"` This is the default field to which the specified codec will be applied. @@ -220,7 +220,7 @@ This is the default field to which the specified codec will be applied. [id="{version}-plugins-{type}s-{plugin}-tls_max_version"] ===== `tls_max_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1.2` The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: @@ -229,12 +229,12 @@ The maximum TLS version allowed for the encrypted connections. The value must be [id="{version}-plugins-{type}s-{plugin}-tls_min_version"] ===== `tls_min_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The minimum TLS version allowed for the encrypted connections. The value must be one of the following: 1.0 for TLS 1.0, 1.1 for TLS 1.1, 1.2 for TLS 1.2 - +[id="{version}-plugins-{type}s-{plugin}-common-options"] include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/inputs/beats-v4.0.4.asciidoc b/docs/versioned-plugins/inputs/beats-v4.0.4.asciidoc index 5e946b384..cfdc2dcda 100644 --- a/docs/versioned-plugins/inputs/beats-v4.0.4.asciidoc +++ b/docs/versioned-plugins/inputs/beats-v4.0.4.asciidoc @@ -12,7 +12,7 @@ START - GENERATED VARIABLES, DO NOT EDIT! END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Beats input plugin {version} @@ -46,13 +46,13 @@ output { NOTE: The Beats shipper automatically sets the `type` field on the event. You cannot override this setting in the Logstash config. If you specify -a setting for the <> config option in +a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in Logstash, it is ignored. IMPORTANT: If you are shipping events that span multiple lines, you need to use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events before sending the event data to Logstash. You cannot use the -<> codec to handle multiline events. Doing so will +{logstash-ref}/plugins-codecs-multiline.html[Multiline codec plugin] codec to handle multiline events. Doing so will result in the failure to start Logstash. @@ -64,20 +64,20 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No -| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -88,7 +88,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-cipher_suites"] ===== `cipher_suites` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` The list of ciphers suite to use, listed by priorities. @@ -96,7 +96,7 @@ The list of ciphers suite to use, listed by priorities. [id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] ===== `client_inactivity_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Close Idle clients after X seconds of inactivity. @@ -105,7 +105,7 @@ Close Idle clients after X seconds of inactivity. ===== `congestion_threshold` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` The number of seconds before we raise a timeout. @@ -114,7 +114,7 @@ This option is useful to control how much time to wait if something is blocking [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` The IP address to listen on. @@ -122,7 +122,7 @@ The IP address to listen on. [id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] ===== `include_codec_tag` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` @@ -131,7 +131,7 @@ The IP address to listen on. ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. The port to listen on. @@ -139,7 +139,7 @@ The port to listen on. [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Events are by default sent in plain text. You can @@ -149,7 +149,7 @@ the `ssl_certificate` and `ssl_key` options. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] ===== `ssl_certificate` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate to use. @@ -157,7 +157,7 @@ SSL certificate to use. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] ===== `ssl_certificate_authorities` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Validate client certificates against these authorities. @@ -169,7 +169,7 @@ to `peer` or `force_peer` to enable the verification. [id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] ===== `ssl_handshake_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` Time in milliseconds for an incomplete ssl handshake to timeout @@ -177,7 +177,7 @@ Time in milliseconds for an incomplete ssl handshake to timeout [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key to use. @@ -187,7 +187,7 @@ for more information. [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. SSL key passphrase to use. @@ -212,7 +212,7 @@ This option needs to be used with `ssl_certificate_authorities` and a defined li ===== `target_field_for_codec` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"message"` This is the default field to which the specified codec will be applied. @@ -220,7 +220,7 @@ This is the default field to which the specified codec will be applied. [id="{version}-plugins-{type}s-{plugin}-tls_max_version"] ===== `tls_max_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1.2` The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: @@ -229,7 +229,7 @@ The maximum TLS version allowed for the encrypted connections. The value must be [id="{version}-plugins-{type}s-{plugin}-tls_min_version"] ===== `tls_min_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The minimum TLS version allowed for the encrypted connections. The value must be one of the following: diff --git a/docs/versioned-plugins/inputs/beats-v4.0.5.asciidoc b/docs/versioned-plugins/inputs/beats-v4.0.5.asciidoc index da902bf99..4d6769416 100644 --- a/docs/versioned-plugins/inputs/beats-v4.0.5.asciidoc +++ b/docs/versioned-plugins/inputs/beats-v4.0.5.asciidoc @@ -46,13 +46,13 @@ output { NOTE: The Beats shipper automatically sets the `type` field on the event. You cannot override this setting in the Logstash config. If you specify -a setting for the <> config option in +a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in Logstash, it is ignored. IMPORTANT: If you are shipping events that span multiple lines, you need to use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events before sending the event data to Logstash. You cannot use the -<> codec to handle multiline events. Doing so will +{logstash-ref}/plugins-codecs-multiline.html[Multiline codec plugin] codec to handle multiline events. Doing so will result in the failure to start Logstash. @@ -64,20 +64,20 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No -| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -88,7 +88,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-cipher_suites"] ===== `cipher_suites` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` The list of ciphers suite to use, listed by priorities. @@ -96,7 +96,7 @@ The list of ciphers suite to use, listed by priorities. [id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] ===== `client_inactivity_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Close Idle clients after X seconds of inactivity. @@ -105,7 +105,7 @@ Close Idle clients after X seconds of inactivity. ===== `congestion_threshold` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` The number of seconds before we raise a timeout. @@ -114,7 +114,7 @@ This option is useful to control how much time to wait if something is blocking [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` The IP address to listen on. @@ -122,7 +122,7 @@ The IP address to listen on. [id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] ===== `include_codec_tag` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` @@ -131,7 +131,7 @@ The IP address to listen on. ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. The port to listen on. @@ -139,7 +139,7 @@ The port to listen on. [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Events are by default sent in plain text. You can @@ -149,7 +149,7 @@ the `ssl_certificate` and `ssl_key` options. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] ===== `ssl_certificate` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate to use. @@ -157,7 +157,7 @@ SSL certificate to use. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] ===== `ssl_certificate_authorities` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Validate client certificates against these authorities. @@ -169,7 +169,7 @@ to `peer` or `force_peer` to enable the verification. [id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] ===== `ssl_handshake_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` Time in milliseconds for an incomplete ssl handshake to timeout @@ -177,7 +177,7 @@ Time in milliseconds for an incomplete ssl handshake to timeout [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key to use. @@ -187,7 +187,7 @@ for more information. [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. SSL key passphrase to use. @@ -212,7 +212,7 @@ This option needs to be used with `ssl_certificate_authorities` and a defined li ===== `target_field_for_codec` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"message"` This is the default field to which the specified codec will be applied. @@ -220,7 +220,7 @@ This is the default field to which the specified codec will be applied. [id="{version}-plugins-{type}s-{plugin}-tls_max_version"] ===== `tls_max_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1.2` The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: @@ -229,7 +229,7 @@ The maximum TLS version allowed for the encrypted connections. The value must be [id="{version}-plugins-{type}s-{plugin}-tls_min_version"] ===== `tls_min_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The minimum TLS version allowed for the encrypted connections. The value must be one of the following: diff --git a/docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc index 463455ad3..11c07a98c 100644 --- a/docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc +++ b/docs/versioned-plugins/inputs/beats-v5.0.0.asciidoc @@ -46,13 +46,13 @@ output { NOTE: The Beats shipper automatically sets the `type` field on the event. You cannot override this setting in the Logstash config. If you specify -a setting for the <> config option in +a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in Logstash, it is ignored. IMPORTANT: If you are shipping events that span multiple lines, you need to use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events before sending the event data to Logstash. You cannot use the -<> codec to handle multiline events. Doing so will +{logstash-ref}/plugins-codecs-multiline.html[Multiline codec plugin] codec to handle multiline events. Doing so will result in the failure to start Logstash. @@ -64,20 +64,20 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No -| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -88,7 +88,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-cipher_suites"] ===== `cipher_suites` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` The list of ciphers suite to use, listed by priorities. @@ -96,7 +96,7 @@ The list of ciphers suite to use, listed by priorities. [id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] ===== `client_inactivity_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Close Idle clients after X seconds of inactivity. @@ -104,7 +104,7 @@ Close Idle clients after X seconds of inactivity. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` The IP address to listen on. @@ -112,7 +112,7 @@ The IP address to listen on. [id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] ===== `include_codec_tag` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` @@ -121,7 +121,7 @@ The IP address to listen on. ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. The port to listen on. @@ -129,7 +129,7 @@ The port to listen on. [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Events are by default sent in plain text. You can @@ -139,7 +139,7 @@ the `ssl_certificate` and `ssl_key` options. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] ===== `ssl_certificate` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate to use. @@ -147,7 +147,7 @@ SSL certificate to use. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] ===== `ssl_certificate_authorities` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Validate client certificates against these authorities. @@ -159,7 +159,7 @@ to `peer` or `force_peer` to enable the verification. [id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] ===== `ssl_handshake_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` Time in milliseconds for an incomplete ssl handshake to timeout @@ -167,7 +167,7 @@ Time in milliseconds for an incomplete ssl handshake to timeout [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key to use. @@ -177,7 +177,7 @@ for more information. [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. SSL key passphrase to use. @@ -201,7 +201,7 @@ This option needs to be used with `ssl_certificate_authorities` and a defined li [id="{version}-plugins-{type}s-{plugin}-tls_max_version"] ===== `tls_max_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1.2` The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: @@ -210,7 +210,7 @@ The maximum TLS version allowed for the encrypted connections. The value must be [id="{version}-plugins-{type}s-{plugin}-tls_min_version"] ===== `tls_min_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The minimum TLS version allowed for the encrypted connections. The value must be one of the following: diff --git a/docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc index c4ee38ffb..7f3b98a59 100644 --- a/docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc +++ b/docs/versioned-plugins/inputs/beats-v5.0.1.asciidoc @@ -46,13 +46,13 @@ output { NOTE: The Beats shipper automatically sets the `type` field on the event. You cannot override this setting in the Logstash config. If you specify -a setting for the <> config option in +a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in Logstash, it is ignored. IMPORTANT: If you are shipping events that span multiple lines, you need to use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events before sending the event data to Logstash. You cannot use the -<> codec to handle multiline events. Doing so will +{logstash-ref}/plugins-codecs-multiline.html[Multiline codec plugin] codec to handle multiline events. Doing so will result in the failure to start Logstash. @@ -64,20 +64,20 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No -| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -88,7 +88,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-cipher_suites"] ===== `cipher_suites` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` The list of ciphers suite to use, listed by priorities. @@ -96,7 +96,7 @@ The list of ciphers suite to use, listed by priorities. [id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] ===== `client_inactivity_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Close Idle clients after X seconds of inactivity. @@ -104,7 +104,7 @@ Close Idle clients after X seconds of inactivity. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` The IP address to listen on. @@ -112,7 +112,7 @@ The IP address to listen on. [id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] ===== `include_codec_tag` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` @@ -121,7 +121,7 @@ The IP address to listen on. ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. The port to listen on. @@ -129,7 +129,7 @@ The port to listen on. [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Events are by default sent in plain text. You can @@ -139,7 +139,7 @@ the `ssl_certificate` and `ssl_key` options. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] ===== `ssl_certificate` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate to use. @@ -147,7 +147,7 @@ SSL certificate to use. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] ===== `ssl_certificate_authorities` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Validate client certificates against these authorities. @@ -159,7 +159,7 @@ to `peer` or `force_peer` to enable the verification. [id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] ===== `ssl_handshake_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` Time in milliseconds for an incomplete ssl handshake to timeout @@ -167,7 +167,7 @@ Time in milliseconds for an incomplete ssl handshake to timeout [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key to use. @@ -177,7 +177,7 @@ for more information. [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. SSL key passphrase to use. @@ -201,7 +201,7 @@ This option needs to be used with `ssl_certificate_authorities` and a defined li [id="{version}-plugins-{type}s-{plugin}-tls_max_version"] ===== `tls_max_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1.2` The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: @@ -210,7 +210,7 @@ The maximum TLS version allowed for the encrypted connections. The value must be [id="{version}-plugins-{type}s-{plugin}-tls_min_version"] ===== `tls_min_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The minimum TLS version allowed for the encrypted connections. The value must be one of the following: diff --git a/docs/versioned-plugins/inputs/beats-v5.0.2.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.2.asciidoc index 5d5fb7c11..57e795ede 100644 --- a/docs/versioned-plugins/inputs/beats-v5.0.2.asciidoc +++ b/docs/versioned-plugins/inputs/beats-v5.0.2.asciidoc @@ -46,13 +46,13 @@ output { NOTE: The Beats shipper automatically sets the `type` field on the event. You cannot override this setting in the Logstash config. If you specify -a setting for the <> config option in +a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in Logstash, it is ignored. IMPORTANT: If you are shipping events that span multiple lines, you need to use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events before sending the event data to Logstash. You cannot use the -<> codec to handle multiline events. Doing so will +{logstash-ref}/plugins-codecs-multiline.html[Multiline codec plugin] codec to handle multiline events. Doing so will result in the failure to start Logstash. @@ -64,20 +64,20 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No -| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -88,7 +88,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-cipher_suites"] ===== `cipher_suites` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` The list of ciphers suite to use, listed by priorities. @@ -96,7 +96,7 @@ The list of ciphers suite to use, listed by priorities. [id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] ===== `client_inactivity_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Close Idle clients after X seconds of inactivity. @@ -104,7 +104,7 @@ Close Idle clients after X seconds of inactivity. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` The IP address to listen on. @@ -112,7 +112,7 @@ The IP address to listen on. [id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] ===== `include_codec_tag` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` @@ -121,7 +121,7 @@ The IP address to listen on. ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. The port to listen on. @@ -129,7 +129,7 @@ The port to listen on. [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Events are by default sent in plain text. You can @@ -139,7 +139,7 @@ the `ssl_certificate` and `ssl_key` options. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] ===== `ssl_certificate` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate to use. @@ -147,7 +147,7 @@ SSL certificate to use. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] ===== `ssl_certificate_authorities` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Validate client certificates against these authorities. @@ -159,7 +159,7 @@ to `peer` or `force_peer` to enable the verification. [id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] ===== `ssl_handshake_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` Time in milliseconds for an incomplete ssl handshake to timeout @@ -167,7 +167,7 @@ Time in milliseconds for an incomplete ssl handshake to timeout [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key to use. @@ -177,7 +177,7 @@ for more information. [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. SSL key passphrase to use. @@ -201,7 +201,7 @@ This option needs to be used with `ssl_certificate_authorities` and a defined li [id="{version}-plugins-{type}s-{plugin}-tls_max_version"] ===== `tls_max_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1.2` The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: @@ -210,7 +210,7 @@ The maximum TLS version allowed for the encrypted connections. The value must be [id="{version}-plugins-{type}s-{plugin}-tls_min_version"] ===== `tls_min_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The minimum TLS version allowed for the encrypted connections. The value must be one of the following: diff --git a/docs/versioned-plugins/inputs/beats-v5.0.3.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.3.asciidoc index 31e16f685..db12db6a1 100644 --- a/docs/versioned-plugins/inputs/beats-v5.0.3.asciidoc +++ b/docs/versioned-plugins/inputs/beats-v5.0.3.asciidoc @@ -46,13 +46,13 @@ output { NOTE: The Beats shipper automatically sets the `type` field on the event. You cannot override this setting in the Logstash config. If you specify -a setting for the <> config option in +a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in Logstash, it is ignored. IMPORTANT: If you are shipping events that span multiple lines, you need to use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events before sending the event data to Logstash. You cannot use the -<> codec to handle multiline events. Doing so will +{logstash-ref}/plugins-codecs-multiline.html[Multiline codec plugin] codec to handle multiline events. Doing so will result in the failure to start Logstash. @@ -64,20 +64,20 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No -| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -88,7 +88,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-cipher_suites"] ===== `cipher_suites` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` The list of ciphers suite to use, listed by priorities. @@ -96,7 +96,7 @@ The list of ciphers suite to use, listed by priorities. [id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] ===== `client_inactivity_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Close Idle clients after X seconds of inactivity. @@ -104,7 +104,7 @@ Close Idle clients after X seconds of inactivity. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` The IP address to listen on. @@ -112,7 +112,7 @@ The IP address to listen on. [id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] ===== `include_codec_tag` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` @@ -121,7 +121,7 @@ The IP address to listen on. ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. The port to listen on. @@ -129,7 +129,7 @@ The port to listen on. [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Events are by default sent in plain text. You can @@ -139,7 +139,7 @@ the `ssl_certificate` and `ssl_key` options. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] ===== `ssl_certificate` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate to use. @@ -147,7 +147,7 @@ SSL certificate to use. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] ===== `ssl_certificate_authorities` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Validate client certificates against these authorities. @@ -159,7 +159,7 @@ to `peer` or `force_peer` to enable the verification. [id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] ===== `ssl_handshake_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` Time in milliseconds for an incomplete ssl handshake to timeout @@ -167,7 +167,7 @@ Time in milliseconds for an incomplete ssl handshake to timeout [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key to use. @@ -177,7 +177,7 @@ for more information. [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. SSL key passphrase to use. @@ -201,7 +201,7 @@ This option needs to be used with `ssl_certificate_authorities` and a defined li [id="{version}-plugins-{type}s-{plugin}-tls_max_version"] ===== `tls_max_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1.2` The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: @@ -210,7 +210,7 @@ The maximum TLS version allowed for the encrypted connections. The value must be [id="{version}-plugins-{type}s-{plugin}-tls_min_version"] ===== `tls_min_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The minimum TLS version allowed for the encrypted connections. The value must be one of the following: diff --git a/docs/versioned-plugins/inputs/beats-v5.0.4.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.4.asciidoc index cd6366e9f..f7afc62eb 100644 --- a/docs/versioned-plugins/inputs/beats-v5.0.4.asciidoc +++ b/docs/versioned-plugins/inputs/beats-v5.0.4.asciidoc @@ -46,13 +46,13 @@ output { NOTE: The Beats shipper automatically sets the `type` field on the event. You cannot override this setting in the Logstash config. If you specify -a setting for the <> config option in +a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in Logstash, it is ignored. IMPORTANT: If you are shipping events that span multiple lines, you need to use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events before sending the event data to Logstash. You cannot use the -<> codec to handle multiline events. Doing so will +{logstash-ref}/plugins-codecs-multiline.html[Multiline codec plugin] codec to handle multiline events. Doing so will result in the failure to start Logstash. @@ -64,20 +64,20 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No -| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -88,7 +88,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-cipher_suites"] ===== `cipher_suites` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` The list of ciphers suite to use, listed by priorities. @@ -96,7 +96,7 @@ The list of ciphers suite to use, listed by priorities. [id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] ===== `client_inactivity_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Close Idle clients after X seconds of inactivity. @@ -104,7 +104,7 @@ Close Idle clients after X seconds of inactivity. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` The IP address to listen on. @@ -112,7 +112,7 @@ The IP address to listen on. [id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] ===== `include_codec_tag` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` @@ -121,7 +121,7 @@ The IP address to listen on. ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. The port to listen on. @@ -129,7 +129,7 @@ The port to listen on. [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Events are by default sent in plain text. You can @@ -139,7 +139,7 @@ the `ssl_certificate` and `ssl_key` options. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] ===== `ssl_certificate` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate to use. @@ -147,7 +147,7 @@ SSL certificate to use. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] ===== `ssl_certificate_authorities` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Validate client certificates against these authorities. @@ -159,7 +159,7 @@ to `peer` or `force_peer` to enable the verification. [id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] ===== `ssl_handshake_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` Time in milliseconds for an incomplete ssl handshake to timeout @@ -167,7 +167,7 @@ Time in milliseconds for an incomplete ssl handshake to timeout [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key to use. @@ -177,7 +177,7 @@ for more information. [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. SSL key passphrase to use. @@ -201,7 +201,7 @@ This option needs to be used with `ssl_certificate_authorities` and a defined li [id="{version}-plugins-{type}s-{plugin}-tls_max_version"] ===== `tls_max_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1.2` The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: @@ -210,7 +210,7 @@ The maximum TLS version allowed for the encrypted connections. The value must be [id="{version}-plugins-{type}s-{plugin}-tls_min_version"] ===== `tls_min_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The minimum TLS version allowed for the encrypted connections. The value must be one of the following: diff --git a/docs/versioned-plugins/inputs/beats-v5.0.5.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.5.asciidoc index 37bbc2759..d3c6976e9 100644 --- a/docs/versioned-plugins/inputs/beats-v5.0.5.asciidoc +++ b/docs/versioned-plugins/inputs/beats-v5.0.5.asciidoc @@ -46,13 +46,13 @@ output { NOTE: The Beats shipper automatically sets the `type` field on the event. You cannot override this setting in the Logstash config. If you specify -a setting for the <> config option in +a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in Logstash, it is ignored. IMPORTANT: If you are shipping events that span multiple lines, you need to use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events before sending the event data to Logstash. You cannot use the -<> codec to handle multiline events. Doing so will +{logstash-ref}/plugins-codecs-multiline.html[Multiline codec plugin] codec to handle multiline events. Doing so will result in the failure to start Logstash. @@ -64,20 +64,20 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No -| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -88,7 +88,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-cipher_suites"] ===== `cipher_suites` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` The list of ciphers suite to use, listed by priorities. @@ -96,7 +96,7 @@ The list of ciphers suite to use, listed by priorities. [id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] ===== `client_inactivity_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Close Idle clients after X seconds of inactivity. @@ -104,7 +104,7 @@ Close Idle clients after X seconds of inactivity. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` The IP address to listen on. @@ -112,7 +112,7 @@ The IP address to listen on. [id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] ===== `include_codec_tag` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` @@ -121,7 +121,7 @@ The IP address to listen on. ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. The port to listen on. @@ -129,7 +129,7 @@ The port to listen on. [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Events are by default sent in plain text. You can @@ -139,7 +139,7 @@ the `ssl_certificate` and `ssl_key` options. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] ===== `ssl_certificate` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate to use. @@ -147,7 +147,7 @@ SSL certificate to use. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] ===== `ssl_certificate_authorities` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Validate client certificates against these authorities. @@ -159,7 +159,7 @@ to `peer` or `force_peer` to enable the verification. [id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] ===== `ssl_handshake_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` Time in milliseconds for an incomplete ssl handshake to timeout @@ -167,7 +167,7 @@ Time in milliseconds for an incomplete ssl handshake to timeout [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key to use. @@ -177,7 +177,7 @@ for more information. [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. SSL key passphrase to use. @@ -201,7 +201,7 @@ This option needs to be used with `ssl_certificate_authorities` and a defined li [id="{version}-plugins-{type}s-{plugin}-tls_max_version"] ===== `tls_max_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1.2` The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: @@ -210,7 +210,7 @@ The maximum TLS version allowed for the encrypted connections. The value must be [id="{version}-plugins-{type}s-{plugin}-tls_min_version"] ===== `tls_min_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The minimum TLS version allowed for the encrypted connections. The value must be one of the following: diff --git a/docs/versioned-plugins/inputs/beats-v5.0.6.asciidoc b/docs/versioned-plugins/inputs/beats-v5.0.6.asciidoc index abb26230b..3686738b7 100644 --- a/docs/versioned-plugins/inputs/beats-v5.0.6.asciidoc +++ b/docs/versioned-plugins/inputs/beats-v5.0.6.asciidoc @@ -46,13 +46,13 @@ output { NOTE: The Beats shipper automatically sets the `type` field on the event. You cannot override this setting in the Logstash config. If you specify -a setting for the <> config option in +a setting for the <<{version}-plugins-inputs-beats-type,`type`>> config option in Logstash, it is ignored. IMPORTANT: If you are shipping events that span multiple lines, you need to use the https://www.elastic.co/guide/en/beats/filebeat/current/multiline-examples.html[configuration options available in Filebeat] to handle multiline events before sending the event data to Logstash. You cannot use the -<> codec to handle multiline events. Doing so will +{logstash-ref}/plugins-codecs-multiline.html[Multiline codec plugin] codec to handle multiline events. Doing so will result in the failure to start Logstash. @@ -64,20 +64,20 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-cipher_suites>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-client_inactivity_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_codec_tag>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_certificate>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No -| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-tls_max_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-tls_min_version>> |{logstash-ref}/configuration-file-structure.html#number[number]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -88,7 +88,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-cipher_suites"] ===== `cipher_suites` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `java.lang.String[TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256]@459cfcca` The list of ciphers suite to use, listed by priorities. @@ -96,7 +96,7 @@ The list of ciphers suite to use, listed by priorities. [id="{version}-plugins-{type}s-{plugin}-client_inactivity_timeout"] ===== `client_inactivity_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Close Idle clients after X seconds of inactivity. @@ -104,7 +104,7 @@ Close Idle clients after X seconds of inactivity. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` The IP address to listen on. @@ -112,7 +112,7 @@ The IP address to listen on. [id="{version}-plugins-{type}s-{plugin}-include_codec_tag"] ===== `include_codec_tag` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` @@ -121,7 +121,7 @@ The IP address to listen on. ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. The port to listen on. @@ -129,7 +129,7 @@ The port to listen on. [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Events are by default sent in plain text. You can @@ -139,7 +139,7 @@ the `ssl_certificate` and `ssl_key` options. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate"] ===== `ssl_certificate` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate to use. @@ -147,7 +147,7 @@ SSL certificate to use. [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_authorities"] ===== `ssl_certificate_authorities` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Validate client certificates against these authorities. @@ -159,7 +159,7 @@ to `peer` or `force_peer` to enable the verification. [id="{version}-plugins-{type}s-{plugin}-ssl_handshake_timeout"] ===== `ssl_handshake_timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` Time in milliseconds for an incomplete ssl handshake to timeout @@ -167,7 +167,7 @@ Time in milliseconds for an incomplete ssl handshake to timeout [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key to use. @@ -177,7 +177,7 @@ for more information. [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. SSL key passphrase to use. @@ -201,7 +201,7 @@ This option needs to be used with `ssl_certificate_authorities` and a defined li [id="{version}-plugins-{type}s-{plugin}-tls_max_version"] ===== `tls_max_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1.2` The maximum TLS version allowed for the encrypted connections. The value must be the one of the following: @@ -210,7 +210,7 @@ The maximum TLS version allowed for the encrypted connections. The value must be [id="{version}-plugins-{type}s-{plugin}-tls_min_version"] ===== `tls_min_version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The minimum TLS version allowed for the encrypted connections. The value must be one of the following: diff --git a/docs/versioned-plugins/inputs/http-v3.0.5.asciidoc b/docs/versioned-plugins/inputs/http-v3.0.5.asciidoc index 17f030c6f..d7d84181e 100644 --- a/docs/versioned-plugins/inputs/http-v3.0.5.asciidoc +++ b/docs/versioned-plugins/inputs/http-v3.0.5.asciidoc @@ -12,7 +12,7 @@ START - GENERATED VARIABLES, DO NOT EDIT! END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Http input plugin {version} @@ -49,17 +49,17 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-additional_codecs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-additional_codecs>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-response_headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-response_headers>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -70,7 +70,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-additional_codecs"] ===== `additional_codecs` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{"application/json"=>"json"}` Apply specific codecs for specific content types. @@ -80,7 +80,7 @@ and no codec for the request's content-type is found [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` Codec used to decode the incoming data. @@ -91,7 +91,7 @@ The host or ip to bind [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS keystore to validate the client's certificates @@ -99,7 +99,7 @@ The JKS keystore to validate the client's certificates [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -107,7 +107,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password for basic authorization @@ -115,7 +115,7 @@ Password for basic authorization [id="{version}-plugins-{type}s-{plugin}-port"] ===== `port` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `8080` The TCP port to bind to @@ -123,7 +123,7 @@ The TCP port to bind to [id="{version}-plugins-{type}s-{plugin}-response_headers"] ===== `response_headers` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{"Content-Type"=>"text/plain"}` specify a custom set of response headers @@ -131,7 +131,7 @@ specify a custom set of response headers [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` SSL Configurations @@ -141,7 +141,7 @@ Enable SSL [id="{version}-plugins-{type}s-{plugin}-threads"] ===== `threads` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `4` Maximum number of threads to use @@ -149,7 +149,7 @@ Maximum number of threads to use [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username for basic authorization diff --git a/docs/versioned-plugins/inputs/http-v3.0.6.asciidoc b/docs/versioned-plugins/inputs/http-v3.0.6.asciidoc index 4bd269320..bb40ccd95 100644 --- a/docs/versioned-plugins/inputs/http-v3.0.6.asciidoc +++ b/docs/versioned-plugins/inputs/http-v3.0.6.asciidoc @@ -49,17 +49,17 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-additional_codecs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-additional_codecs>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-response_headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-response_headers>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -70,7 +70,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-additional_codecs"] ===== `additional_codecs` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{"application/json"=>"json"}` Apply specific codecs for specific content types. @@ -80,7 +80,7 @@ and no codec for the request's content-type is found [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` Codec used to decode the incoming data. @@ -91,7 +91,7 @@ The host or ip to bind [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS keystore to validate the client's certificates @@ -99,7 +99,7 @@ The JKS keystore to validate the client's certificates [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -107,7 +107,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password for basic authorization @@ -115,7 +115,7 @@ Password for basic authorization [id="{version}-plugins-{type}s-{plugin}-port"] ===== `port` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `8080` The TCP port to bind to @@ -123,7 +123,7 @@ The TCP port to bind to [id="{version}-plugins-{type}s-{plugin}-response_headers"] ===== `response_headers` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{"Content-Type"=>"text/plain"}` specify a custom set of response headers @@ -131,7 +131,7 @@ specify a custom set of response headers [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` SSL Configurations @@ -141,7 +141,7 @@ Enable SSL [id="{version}-plugins-{type}s-{plugin}-threads"] ===== `threads` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `4` Maximum number of threads to use @@ -149,7 +149,7 @@ Maximum number of threads to use [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username for basic authorization diff --git a/docs/versioned-plugins/inputs/http-v3.0.7.asciidoc b/docs/versioned-plugins/inputs/http-v3.0.7.asciidoc index a4526c066..cd4f7010b 100644 --- a/docs/versioned-plugins/inputs/http-v3.0.7.asciidoc +++ b/docs/versioned-plugins/inputs/http-v3.0.7.asciidoc @@ -49,17 +49,17 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-additional_codecs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-additional_codecs>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-response_headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-response_headers>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -70,7 +70,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-additional_codecs"] ===== `additional_codecs` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{"application/json"=>"json"}` Apply specific codecs for specific content types. @@ -80,7 +80,7 @@ and no codec for the request's content-type is found [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` The host or ip to bind @@ -88,7 +88,7 @@ The host or ip to bind [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS keystore to validate the client's certificates @@ -96,7 +96,7 @@ The JKS keystore to validate the client's certificates [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -104,7 +104,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password for basic authorization @@ -112,7 +112,7 @@ Password for basic authorization [id="{version}-plugins-{type}s-{plugin}-port"] ===== `port` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `8080` The TCP port to bind to @@ -120,7 +120,7 @@ The TCP port to bind to [id="{version}-plugins-{type}s-{plugin}-response_headers"] ===== `response_headers` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{"Content-Type"=>"text/plain"}` specify a custom set of response headers @@ -128,7 +128,7 @@ specify a custom set of response headers [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` SSL Configurations @@ -138,7 +138,7 @@ Enable SSL [id="{version}-plugins-{type}s-{plugin}-threads"] ===== `threads` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `4` Maximum number of threads to use @@ -146,7 +146,7 @@ Maximum number of threads to use [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username for basic authorization diff --git a/docs/versioned-plugins/inputs/http-v3.0.8.asciidoc b/docs/versioned-plugins/inputs/http-v3.0.8.asciidoc index 01d5d9f35..3224ded6a 100644 --- a/docs/versioned-plugins/inputs/http-v3.0.8.asciidoc +++ b/docs/versioned-plugins/inputs/http-v3.0.8.asciidoc @@ -62,17 +62,17 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-additional_codecs>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-additional_codecs>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-response_headers>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-threads>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-verify_mode>> |<>, one of `["none", "peer", "force_peer"]`|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-response_headers>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-threads>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-verify_mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["none", "peer", "force_peer"]`|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -83,7 +83,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-additional_codecs"] ===== `additional_codecs` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{"application/json"=>"json"}` Apply specific codecs for specific content types. @@ -93,7 +93,7 @@ and no codec for the request's content-type is found [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` The host or ip to bind @@ -101,7 +101,7 @@ The host or ip to bind [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS keystore to validate the client's certificates @@ -109,7 +109,7 @@ The JKS keystore to validate the client's certificates [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -117,7 +117,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password for basic authorization @@ -125,7 +125,7 @@ Password for basic authorization [id="{version}-plugins-{type}s-{plugin}-port"] ===== `port` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `8080` The TCP port to bind to @@ -133,7 +133,7 @@ The TCP port to bind to [id="{version}-plugins-{type}s-{plugin}-response_headers"] ===== `response_headers` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{"Content-Type"=>"text/plain"}` specify a custom set of response headers @@ -141,7 +141,7 @@ specify a custom set of response headers [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` SSL Configurations @@ -151,7 +151,7 @@ Enable SSL [id="{version}-plugins-{type}s-{plugin}-threads"] ===== `threads` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `4` Maximum number of threads to use @@ -159,7 +159,7 @@ Maximum number of threads to use [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username for basic authorization diff --git a/docs/versioned-plugins/inputs/s3-v3.1.5.asciidoc b/docs/versioned-plugins/inputs/s3-v3.1.5.asciidoc index 02ad0778a..5814470ab 100644 --- a/docs/versioned-plugins/inputs/s3-v3.1.5.asciidoc +++ b/docs/versioned-plugins/inputs/s3-v3.1.5.asciidoc @@ -12,7 +12,7 @@ START - GENERATED VARIABLES, DO NOT EDIT! END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === S3 input plugin {version} @@ -33,22 +33,22 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-region>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -59,7 +59,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-access_key_id"] ===== `access_key_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: @@ -73,7 +73,7 @@ This plugin uses the AWS SDK and supports several ways to get credentials, which [id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] ===== `aws_credentials_file` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Path to YAML file containing a hash of AWS credentials. @@ -91,7 +91,7 @@ file should look like this: [id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] ===== `backup_add_prefix` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Append a prefix to the key (full path including file name in s3) after processing. @@ -101,7 +101,7 @@ choose a new 'folder' to place the files in [id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] ===== `backup_to_bucket` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Name of a S3 bucket to backup processed files to. @@ -109,7 +109,7 @@ Name of a S3 bucket to backup processed files to. [id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] ===== `backup_to_dir` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Path of a local directory to backup processed files to. @@ -118,7 +118,7 @@ Path of a local directory to backup processed files to. ===== `bucket` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The name of the S3 bucket. @@ -126,7 +126,7 @@ The name of the S3 bucket. [id="{version}-plugins-{type}s-{plugin}-delete"] ===== `delete` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Whether to delete processed files from the original bucket. @@ -134,7 +134,7 @@ Whether to delete processed files from the original bucket. [id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] ===== `exclude_pattern` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Ruby style regexp of keys to exclude from the bucket @@ -142,7 +142,7 @@ Ruby style regexp of keys to exclude from the bucket [id="{version}-plugins-{type}s-{plugin}-interval"] ===== `interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Interval to wait between to check the file list again after a run is finished. @@ -151,7 +151,7 @@ Value is in seconds. [id="{version}-plugins-{type}s-{plugin}-prefix"] ===== `prefix` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` If specified, the prefix of filenames in the bucket must match (not a regexp) @@ -159,7 +159,7 @@ If specified, the prefix of filenames in the bucket must match (not a regexp) [id="{version}-plugins-{type}s-{plugin}-proxy_uri"] ===== `proxy_uri` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. URI to proxy server if required @@ -175,7 +175,7 @@ The AWS Region [id="{version}-plugins-{type}s-{plugin}-secret_access_key"] ===== `secret_access_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The AWS Secret Access Key @@ -183,7 +183,7 @@ The AWS Secret Access Key [id="{version}-plugins-{type}s-{plugin}-session_token"] ===== `session_token` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The AWS Session token for temporary credential @@ -191,7 +191,7 @@ The AWS Session token for temporary credential [id="{version}-plugins-{type}s-{plugin}-sincedb_path"] ===== `sincedb_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Where to write the since database (keeps track of the date @@ -202,7 +202,7 @@ Should be a path with filename not just a directory. [id="{version}-plugins-{type}s-{plugin}-temporary_directory"] ===== `temporary_directory` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"/tmp/logstash"` Set the directory where logstash will store the tmp files before processing them. diff --git a/docs/versioned-plugins/inputs/s3-v3.1.6.asciidoc b/docs/versioned-plugins/inputs/s3-v3.1.6.asciidoc index d5823f473..e6e62de76 100644 --- a/docs/versioned-plugins/inputs/s3-v3.1.6.asciidoc +++ b/docs/versioned-plugins/inputs/s3-v3.1.6.asciidoc @@ -33,22 +33,22 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-region>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -59,7 +59,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-access_key_id"] ===== `access_key_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: @@ -73,7 +73,7 @@ This plugin uses the AWS SDK and supports several ways to get credentials, which [id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] ===== `aws_credentials_file` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Path to YAML file containing a hash of AWS credentials. @@ -91,7 +91,7 @@ file should look like this: [id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] ===== `backup_add_prefix` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Append a prefix to the key (full path including file name in s3) after processing. @@ -101,7 +101,7 @@ choose a new 'folder' to place the files in [id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] ===== `backup_to_bucket` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Name of a S3 bucket to backup processed files to. @@ -109,7 +109,7 @@ Name of a S3 bucket to backup processed files to. [id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] ===== `backup_to_dir` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Path of a local directory to backup processed files to. @@ -118,7 +118,7 @@ Path of a local directory to backup processed files to. ===== `bucket` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The name of the S3 bucket. @@ -126,7 +126,7 @@ The name of the S3 bucket. [id="{version}-plugins-{type}s-{plugin}-delete"] ===== `delete` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Whether to delete processed files from the original bucket. @@ -134,7 +134,7 @@ Whether to delete processed files from the original bucket. [id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] ===== `exclude_pattern` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Ruby style regexp of keys to exclude from the bucket @@ -142,7 +142,7 @@ Ruby style regexp of keys to exclude from the bucket [id="{version}-plugins-{type}s-{plugin}-interval"] ===== `interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Interval to wait between to check the file list again after a run is finished. @@ -151,7 +151,7 @@ Value is in seconds. [id="{version}-plugins-{type}s-{plugin}-prefix"] ===== `prefix` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` If specified, the prefix of filenames in the bucket must match (not a regexp) @@ -159,7 +159,7 @@ If specified, the prefix of filenames in the bucket must match (not a regexp) [id="{version}-plugins-{type}s-{plugin}-proxy_uri"] ===== `proxy_uri` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. URI to proxy server if required @@ -175,7 +175,7 @@ The AWS Region [id="{version}-plugins-{type}s-{plugin}-secret_access_key"] ===== `secret_access_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The AWS Secret Access Key @@ -183,7 +183,7 @@ The AWS Secret Access Key [id="{version}-plugins-{type}s-{plugin}-session_token"] ===== `session_token` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The AWS Session token for temporary credential @@ -191,7 +191,7 @@ The AWS Session token for temporary credential [id="{version}-plugins-{type}s-{plugin}-sincedb_path"] ===== `sincedb_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Where to write the since database (keeps track of the date @@ -202,7 +202,7 @@ Should be a path with filename not just a directory. [id="{version}-plugins-{type}s-{plugin}-temporary_directory"] ===== `temporary_directory` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"/tmp/logstash"` Set the directory where logstash will store the tmp files before processing them. diff --git a/docs/versioned-plugins/inputs/s3-v3.1.7.asciidoc b/docs/versioned-plugins/inputs/s3-v3.1.7.asciidoc index ecdc5f624..aeb66c47d 100644 --- a/docs/versioned-plugins/inputs/s3-v3.1.7.asciidoc +++ b/docs/versioned-plugins/inputs/s3-v3.1.7.asciidoc @@ -33,22 +33,22 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-region>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -59,7 +59,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-access_key_id"] ===== `access_key_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: @@ -73,7 +73,7 @@ This plugin uses the AWS SDK and supports several ways to get credentials, which [id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] ===== `aws_credentials_file` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Path to YAML file containing a hash of AWS credentials. @@ -91,7 +91,7 @@ file should look like this: [id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] ===== `backup_add_prefix` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Append a prefix to the key (full path including file name in s3) after processing. @@ -101,7 +101,7 @@ choose a new 'folder' to place the files in [id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] ===== `backup_to_bucket` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Name of a S3 bucket to backup processed files to. @@ -109,7 +109,7 @@ Name of a S3 bucket to backup processed files to. [id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] ===== `backup_to_dir` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Path of a local directory to backup processed files to. @@ -118,7 +118,7 @@ Path of a local directory to backup processed files to. ===== `bucket` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The name of the S3 bucket. @@ -126,7 +126,7 @@ The name of the S3 bucket. [id="{version}-plugins-{type}s-{plugin}-delete"] ===== `delete` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Whether to delete processed files from the original bucket. @@ -134,7 +134,7 @@ Whether to delete processed files from the original bucket. [id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] ===== `exclude_pattern` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Ruby style regexp of keys to exclude from the bucket @@ -142,7 +142,7 @@ Ruby style regexp of keys to exclude from the bucket [id="{version}-plugins-{type}s-{plugin}-interval"] ===== `interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Interval to wait between to check the file list again after a run is finished. @@ -151,7 +151,7 @@ Value is in seconds. [id="{version}-plugins-{type}s-{plugin}-prefix"] ===== `prefix` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` If specified, the prefix of filenames in the bucket must match (not a regexp) @@ -159,7 +159,7 @@ If specified, the prefix of filenames in the bucket must match (not a regexp) [id="{version}-plugins-{type}s-{plugin}-proxy_uri"] ===== `proxy_uri` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. URI to proxy server if required @@ -175,7 +175,7 @@ The AWS Region [id="{version}-plugins-{type}s-{plugin}-secret_access_key"] ===== `secret_access_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The AWS Secret Access Key @@ -183,7 +183,7 @@ The AWS Secret Access Key [id="{version}-plugins-{type}s-{plugin}-session_token"] ===== `session_token` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The AWS Session token for temporary credential @@ -191,7 +191,7 @@ The AWS Session token for temporary credential [id="{version}-plugins-{type}s-{plugin}-sincedb_path"] ===== `sincedb_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Where to write the since database (keeps track of the date @@ -202,7 +202,7 @@ Should be a path with filename not just a directory. [id="{version}-plugins-{type}s-{plugin}-temporary_directory"] ===== `temporary_directory` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"/tmp/logstash"` Set the directory where logstash will store the tmp files before processing them. diff --git a/docs/versioned-plugins/inputs/s3-v3.1.8.asciidoc b/docs/versioned-plugins/inputs/s3-v3.1.8.asciidoc index d251d6064..9a5d4f7ee 100644 --- a/docs/versioned-plugins/inputs/s3-v3.1.8.asciidoc +++ b/docs/versioned-plugins/inputs/s3-v3.1.8.asciidoc @@ -33,22 +33,22 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-region>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -59,7 +59,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-access_key_id"] ===== `access_key_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: @@ -73,7 +73,7 @@ This plugin uses the AWS SDK and supports several ways to get credentials, which [id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] ===== `aws_credentials_file` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Path to YAML file containing a hash of AWS credentials. @@ -91,7 +91,7 @@ file should look like this: [id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] ===== `backup_add_prefix` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Append a prefix to the key (full path including file name in s3) after processing. @@ -101,7 +101,7 @@ choose a new 'folder' to place the files in [id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] ===== `backup_to_bucket` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Name of a S3 bucket to backup processed files to. @@ -109,7 +109,7 @@ Name of a S3 bucket to backup processed files to. [id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] ===== `backup_to_dir` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Path of a local directory to backup processed files to. @@ -118,7 +118,7 @@ Path of a local directory to backup processed files to. ===== `bucket` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The name of the S3 bucket. @@ -126,7 +126,7 @@ The name of the S3 bucket. [id="{version}-plugins-{type}s-{plugin}-delete"] ===== `delete` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Whether to delete processed files from the original bucket. @@ -134,7 +134,7 @@ Whether to delete processed files from the original bucket. [id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] ===== `exclude_pattern` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Ruby style regexp of keys to exclude from the bucket @@ -142,7 +142,7 @@ Ruby style regexp of keys to exclude from the bucket [id="{version}-plugins-{type}s-{plugin}-interval"] ===== `interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Interval to wait between to check the file list again after a run is finished. @@ -151,7 +151,7 @@ Value is in seconds. [id="{version}-plugins-{type}s-{plugin}-prefix"] ===== `prefix` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` If specified, the prefix of filenames in the bucket must match (not a regexp) @@ -159,7 +159,7 @@ If specified, the prefix of filenames in the bucket must match (not a regexp) [id="{version}-plugins-{type}s-{plugin}-proxy_uri"] ===== `proxy_uri` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. URI to proxy server if required @@ -175,7 +175,7 @@ The AWS Region [id="{version}-plugins-{type}s-{plugin}-secret_access_key"] ===== `secret_access_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The AWS Secret Access Key @@ -183,7 +183,7 @@ The AWS Secret Access Key [id="{version}-plugins-{type}s-{plugin}-session_token"] ===== `session_token` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The AWS Session token for temporary credential @@ -191,7 +191,7 @@ The AWS Session token for temporary credential [id="{version}-plugins-{type}s-{plugin}-sincedb_path"] ===== `sincedb_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Where to write the since database (keeps track of the date @@ -202,7 +202,7 @@ Should be a path with filename not just a directory. [id="{version}-plugins-{type}s-{plugin}-temporary_directory"] ===== `temporary_directory` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"/tmp/logstash"` Set the directory where logstash will store the tmp files before processing them. diff --git a/docs/versioned-plugins/inputs/s3-v3.1.9.asciidoc b/docs/versioned-plugins/inputs/s3-v3.1.9.asciidoc index 7853a8d6f..2ceaaaf40 100644 --- a/docs/versioned-plugins/inputs/s3-v3.1.9.asciidoc +++ b/docs/versioned-plugins/inputs/s3-v3.1.9.asciidoc @@ -33,22 +33,22 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-region>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -59,7 +59,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-access_key_id"] ===== `access_key_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: @@ -73,7 +73,7 @@ This plugin uses the AWS SDK and supports several ways to get credentials, which [id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] ===== `aws_credentials_file` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Path to YAML file containing a hash of AWS credentials. @@ -91,7 +91,7 @@ file should look like this: [id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] ===== `backup_add_prefix` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Append a prefix to the key (full path including file name in s3) after processing. @@ -101,7 +101,7 @@ choose a new 'folder' to place the files in [id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] ===== `backup_to_bucket` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Name of a S3 bucket to backup processed files to. @@ -109,7 +109,7 @@ Name of a S3 bucket to backup processed files to. [id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] ===== `backup_to_dir` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Path of a local directory to backup processed files to. @@ -118,7 +118,7 @@ Path of a local directory to backup processed files to. ===== `bucket` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The name of the S3 bucket. @@ -126,7 +126,7 @@ The name of the S3 bucket. [id="{version}-plugins-{type}s-{plugin}-delete"] ===== `delete` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Whether to delete processed files from the original bucket. @@ -134,7 +134,7 @@ Whether to delete processed files from the original bucket. [id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] ===== `exclude_pattern` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Ruby style regexp of keys to exclude from the bucket @@ -142,7 +142,7 @@ Ruby style regexp of keys to exclude from the bucket [id="{version}-plugins-{type}s-{plugin}-interval"] ===== `interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Interval to wait between to check the file list again after a run is finished. @@ -151,7 +151,7 @@ Value is in seconds. [id="{version}-plugins-{type}s-{plugin}-prefix"] ===== `prefix` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` If specified, the prefix of filenames in the bucket must match (not a regexp) @@ -159,7 +159,7 @@ If specified, the prefix of filenames in the bucket must match (not a regexp) [id="{version}-plugins-{type}s-{plugin}-proxy_uri"] ===== `proxy_uri` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. URI to proxy server if required @@ -175,7 +175,7 @@ The AWS Region [id="{version}-plugins-{type}s-{plugin}-secret_access_key"] ===== `secret_access_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The AWS Secret Access Key @@ -183,7 +183,7 @@ The AWS Secret Access Key [id="{version}-plugins-{type}s-{plugin}-session_token"] ===== `session_token` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The AWS Session token for temporary credential @@ -191,7 +191,7 @@ The AWS Session token for temporary credential [id="{version}-plugins-{type}s-{plugin}-sincedb_path"] ===== `sincedb_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Where to write the since database (keeps track of the date @@ -203,7 +203,7 @@ If specified, this setting must be a filename path and not just a directory. [id="{version}-plugins-{type}s-{plugin}-temporary_directory"] ===== `temporary_directory` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"/tmp/logstash"` Set the directory where logstash will store the tmp files before processing them. diff --git a/docs/versioned-plugins/inputs/s3-v3.2.0.asciidoc b/docs/versioned-plugins/inputs/s3-v3.2.0.asciidoc index 3aca2d8c9..d2482e853 100644 --- a/docs/versioned-plugins/inputs/s3-v3.2.0.asciidoc +++ b/docs/versioned-plugins/inputs/s3-v3.2.0.asciidoc @@ -33,22 +33,22 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bucket>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-delete>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-prefix>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-region>> |<>, one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No -| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-session_token>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-access_key_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-aws_credentials_file>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_add_prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-backup_to_dir>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bucket>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-delete>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-exclude_pattern>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-prefix>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-proxy_uri>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-region>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["us-east-1", "us-east-2", "us-west-1", "us-west-2", "eu-central-1", "eu-west-1", "eu-west-2", "ap-southeast-1", "ap-southeast-2", "ap-northeast-1", "ap-northeast-2", "sa-east-1", "us-gov-west-1", "cn-north-1", "ap-south-1", "ca-central-1"]`|No +| <<{version}-plugins-{type}s-{plugin}-secret_access_key>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-session_token>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-sincedb_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-temporary_directory>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -59,7 +59,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-access_key_id"] ===== `access_key_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. This plugin uses the AWS SDK and supports several ways to get credentials, which will be tried in this order: @@ -73,7 +73,7 @@ This plugin uses the AWS SDK and supports several ways to get credentials, which [id="{version}-plugins-{type}s-{plugin}-aws_credentials_file"] ===== `aws_credentials_file` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Path to YAML file containing a hash of AWS credentials. @@ -91,7 +91,7 @@ file should look like this: [id="{version}-plugins-{type}s-{plugin}-backup_add_prefix"] ===== `backup_add_prefix` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Append a prefix to the key (full path including file name in s3) after processing. @@ -101,7 +101,7 @@ choose a new 'folder' to place the files in [id="{version}-plugins-{type}s-{plugin}-backup_to_bucket"] ===== `backup_to_bucket` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Name of a S3 bucket to backup processed files to. @@ -109,7 +109,7 @@ Name of a S3 bucket to backup processed files to. [id="{version}-plugins-{type}s-{plugin}-backup_to_dir"] ===== `backup_to_dir` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Path of a local directory to backup processed files to. @@ -118,7 +118,7 @@ Path of a local directory to backup processed files to. ===== `bucket` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The name of the S3 bucket. @@ -126,7 +126,7 @@ The name of the S3 bucket. [id="{version}-plugins-{type}s-{plugin}-delete"] ===== `delete` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Whether to delete processed files from the original bucket. @@ -134,7 +134,7 @@ Whether to delete processed files from the original bucket. [id="{version}-plugins-{type}s-{plugin}-exclude_pattern"] ===== `exclude_pattern` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Ruby style regexp of keys to exclude from the bucket @@ -142,7 +142,7 @@ Ruby style regexp of keys to exclude from the bucket [id="{version}-plugins-{type}s-{plugin}-interval"] ===== `interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Interval to wait between to check the file list again after a run is finished. @@ -151,7 +151,7 @@ Value is in seconds. [id="{version}-plugins-{type}s-{plugin}-prefix"] ===== `prefix` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` If specified, the prefix of filenames in the bucket must match (not a regexp) @@ -159,7 +159,7 @@ If specified, the prefix of filenames in the bucket must match (not a regexp) [id="{version}-plugins-{type}s-{plugin}-proxy_uri"] ===== `proxy_uri` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. URI to proxy server if required @@ -175,7 +175,7 @@ The AWS Region [id="{version}-plugins-{type}s-{plugin}-secret_access_key"] ===== `secret_access_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The AWS Secret Access Key @@ -183,7 +183,7 @@ The AWS Secret Access Key [id="{version}-plugins-{type}s-{plugin}-session_token"] ===== `session_token` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The AWS Session token for temporary credential @@ -191,7 +191,7 @@ The AWS Session token for temporary credential [id="{version}-plugins-{type}s-{plugin}-sincedb_path"] ===== `sincedb_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Where to write the since database (keeps track of the date @@ -203,7 +203,7 @@ If specified, this setting must be a filename path and not just a directory. [id="{version}-plugins-{type}s-{plugin}-temporary_directory"] ===== `temporary_directory` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"/tmp/logstash"` Set the directory where logstash will store the tmp files before processing them. diff --git a/docs/versioned-plugins/inputs/tcp-v4.1.2.asciidoc b/docs/versioned-plugins/inputs/tcp-v4.1.2.asciidoc index 3f504273f..21d7816aa 100644 --- a/docs/versioned-plugins/inputs/tcp-v4.1.2.asciidoc +++ b/docs/versioned-plugins/inputs/tcp-v4.1.2.asciidoc @@ -12,7 +12,7 @@ START - GENERATED VARIABLES, DO NOT EDIT! END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Tcp input plugin {version} @@ -77,16 +77,16 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |{logstash-ref}/configuration-file-structure.html#array[array]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -98,7 +98,7 @@ input plugins. ===== `data_timeout` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `-1` @@ -106,7 +106,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` When mode is `server`, the address to listen on. @@ -125,7 +125,7 @@ Mode to operate in. `server` listens for client connections, ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. When mode is `server`, the port to listen on. @@ -134,7 +134,7 @@ When mode is `client`, the port to connect to. [id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] ===== `proxy_protocol` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Proxy protocol support, only v1 is supported at this time @@ -144,7 +144,7 @@ http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt ===== `ssl_cacert` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. @@ -152,7 +152,7 @@ The SSL CA certificate, chainfile or CA path. The system CA path is automaticall [id="{version}-plugins-{type}s-{plugin}-ssl_cert"] ===== `ssl_cert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate path @@ -160,7 +160,7 @@ SSL certificate path [id="{version}-plugins-{type}s-{plugin}-ssl_enable"] ===== `ssl_enable` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable SSL (must be set for other `ssl_` options to take effect). @@ -168,7 +168,7 @@ Enable SSL (must be set for other `ssl_` options to take effect). [id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] ===== `ssl_extra_chain_certs` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` An Array of extra X509 certificates to be added to the certificate chain. @@ -177,7 +177,7 @@ Useful when the CA chain is not necessary in the system store. [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key path @@ -185,7 +185,7 @@ SSL key path [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * Default value is `nil` SSL key passphrase @@ -193,7 +193,7 @@ SSL key passphrase [id="{version}-plugins-{type}s-{plugin}-ssl_verify"] ===== `ssl_verify` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Verify the identity of the other end of the SSL connection against the CA. diff --git a/docs/versioned-plugins/inputs/tcp-v4.2.2.asciidoc b/docs/versioned-plugins/inputs/tcp-v4.2.2.asciidoc index d31d5b8f1..c5729d0e4 100644 --- a/docs/versioned-plugins/inputs/tcp-v4.2.2.asciidoc +++ b/docs/versioned-plugins/inputs/tcp-v4.2.2.asciidoc @@ -77,16 +77,16 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |{logstash-ref}/configuration-file-structure.html#array[array]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -98,7 +98,7 @@ input plugins. ===== `data_timeout` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `-1` @@ -106,7 +106,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` When mode is `server`, the address to listen on. @@ -125,7 +125,7 @@ Mode to operate in. `server` listens for client connections, ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. When mode is `server`, the port to listen on. @@ -134,7 +134,7 @@ When mode is `client`, the port to connect to. [id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] ===== `proxy_protocol` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Proxy protocol support, only v1 is supported at this time @@ -144,7 +144,7 @@ http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt ===== `ssl_cacert` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. @@ -152,7 +152,7 @@ The SSL CA certificate, chainfile or CA path. The system CA path is automaticall [id="{version}-plugins-{type}s-{plugin}-ssl_cert"] ===== `ssl_cert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate path @@ -160,7 +160,7 @@ SSL certificate path [id="{version}-plugins-{type}s-{plugin}-ssl_enable"] ===== `ssl_enable` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable SSL (must be set for other `ssl_` options to take effect). @@ -168,7 +168,7 @@ Enable SSL (must be set for other `ssl_` options to take effect). [id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] ===== `ssl_extra_chain_certs` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` An Array of extra X509 certificates to be added to the certificate chain. @@ -177,7 +177,7 @@ Useful when the CA chain is not necessary in the system store. [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key path @@ -185,7 +185,7 @@ SSL key path [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * Default value is `nil` SSL key passphrase @@ -193,7 +193,7 @@ SSL key passphrase [id="{version}-plugins-{type}s-{plugin}-ssl_verify"] ===== `ssl_verify` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Verify the identity of the other end of the SSL connection against the CA. diff --git a/docs/versioned-plugins/inputs/tcp-v4.2.3.asciidoc b/docs/versioned-plugins/inputs/tcp-v4.2.3.asciidoc index 3e204aee5..22e027905 100644 --- a/docs/versioned-plugins/inputs/tcp-v4.2.3.asciidoc +++ b/docs/versioned-plugins/inputs/tcp-v4.2.3.asciidoc @@ -77,16 +77,16 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |{logstash-ref}/configuration-file-structure.html#array[array]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -98,7 +98,7 @@ input plugins. ===== `data_timeout` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `-1` @@ -106,7 +106,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` When mode is `server`, the address to listen on. @@ -125,7 +125,7 @@ Mode to operate in. `server` listens for client connections, ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. When mode is `server`, the port to listen on. @@ -134,7 +134,7 @@ When mode is `client`, the port to connect to. [id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] ===== `proxy_protocol` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Proxy protocol support, only v1 is supported at this time @@ -144,7 +144,7 @@ http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt ===== `ssl_cacert` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. @@ -152,7 +152,7 @@ The SSL CA certificate, chainfile or CA path. The system CA path is automaticall [id="{version}-plugins-{type}s-{plugin}-ssl_cert"] ===== `ssl_cert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate path @@ -160,7 +160,7 @@ SSL certificate path [id="{version}-plugins-{type}s-{plugin}-ssl_enable"] ===== `ssl_enable` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable SSL (must be set for other `ssl_` options to take effect). @@ -168,7 +168,7 @@ Enable SSL (must be set for other `ssl_` options to take effect). [id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] ===== `ssl_extra_chain_certs` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` An Array of extra X509 certificates to be added to the certificate chain. @@ -177,7 +177,7 @@ Useful when the CA chain is not necessary in the system store. [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key path @@ -185,7 +185,7 @@ SSL key path [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * Default value is `nil` SSL key passphrase @@ -193,7 +193,7 @@ SSL key passphrase [id="{version}-plugins-{type}s-{plugin}-ssl_verify"] ===== `ssl_verify` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Verify the identity of the other end of the SSL connection against the CA. diff --git a/docs/versioned-plugins/inputs/tcp-v4.2.4.asciidoc b/docs/versioned-plugins/inputs/tcp-v4.2.4.asciidoc index 91a9a7b55..76d5809da 100644 --- a/docs/versioned-plugins/inputs/tcp-v4.2.4.asciidoc +++ b/docs/versioned-plugins/inputs/tcp-v4.2.4.asciidoc @@ -77,16 +77,16 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |{logstash-ref}/configuration-file-structure.html#array[array]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -98,7 +98,7 @@ input plugins. ===== `data_timeout` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `-1` @@ -106,7 +106,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` When mode is `server`, the address to listen on. @@ -125,7 +125,7 @@ Mode to operate in. `server` listens for client connections, ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. When mode is `server`, the port to listen on. @@ -134,7 +134,7 @@ When mode is `client`, the port to connect to. [id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] ===== `proxy_protocol` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Proxy protocol support, only v1 is supported at this time @@ -144,7 +144,7 @@ http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt ===== `ssl_cacert` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The SSL CA certificate, chainfile or CA path. The system CA path is automatically included. @@ -152,7 +152,7 @@ The SSL CA certificate, chainfile or CA path. The system CA path is automaticall [id="{version}-plugins-{type}s-{plugin}-ssl_cert"] ===== `ssl_cert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate path @@ -160,7 +160,7 @@ SSL certificate path [id="{version}-plugins-{type}s-{plugin}-ssl_enable"] ===== `ssl_enable` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable SSL (must be set for other `ssl_` options to take effect). @@ -168,7 +168,7 @@ Enable SSL (must be set for other `ssl_` options to take effect). [id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] ===== `ssl_extra_chain_certs` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` An Array of extra X509 certificates to be added to the certificate chain. @@ -177,7 +177,7 @@ Useful when the CA chain is not necessary in the system store. [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key path @@ -185,7 +185,7 @@ SSL key path [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * Default value is `nil` SSL key passphrase @@ -193,7 +193,7 @@ SSL key passphrase [id="{version}-plugins-{type}s-{plugin}-ssl_verify"] ===== `ssl_verify` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Verify the identity of the other end of the SSL connection against the CA. diff --git a/docs/versioned-plugins/inputs/tcp-v5.0.0.asciidoc b/docs/versioned-plugins/inputs/tcp-v5.0.0.asciidoc index 9876455c5..a0746ae4d 100644 --- a/docs/versioned-plugins/inputs/tcp-v5.0.0.asciidoc +++ b/docs/versioned-plugins/inputs/tcp-v5.0.0.asciidoc @@ -77,16 +77,16 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |{logstash-ref}/configuration-file-structure.html#array[array]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -97,7 +97,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` When mode is `server`, the address to listen on. @@ -116,7 +116,7 @@ Mode to operate in. `server` listens for client connections, ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. When mode is `server`, the port to listen on. @@ -125,7 +125,7 @@ When mode is `client`, the port to connect to. [id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] ===== `proxy_protocol` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Proxy protocol support, only v1 is supported at this time @@ -134,7 +134,7 @@ http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt [id="{version}-plugins-{type}s-{plugin}-ssl_cert"] ===== `ssl_cert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate path @@ -142,7 +142,7 @@ SSL certificate path [id="{version}-plugins-{type}s-{plugin}-ssl_enable"] ===== `ssl_enable` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable SSL (must be set for other `ssl_` options to take effect). @@ -150,7 +150,7 @@ Enable SSL (must be set for other `ssl_` options to take effect). [id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] ===== `ssl_extra_chain_certs` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` An Array of extra X509 certificates to be added to the certificate chain. @@ -159,7 +159,7 @@ Useful when the CA chain is not necessary in the system store. [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key path @@ -167,7 +167,7 @@ SSL key path [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * Default value is `nil` SSL key passphrase @@ -175,7 +175,7 @@ SSL key passphrase [id="{version}-plugins-{type}s-{plugin}-ssl_verify"] ===== `ssl_verify` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Verify the identity of the other end of the SSL connection against the CA. diff --git a/docs/versioned-plugins/inputs/tcp-v5.0.1.asciidoc b/docs/versioned-plugins/inputs/tcp-v5.0.1.asciidoc index dbccd54b2..ed31c2842 100644 --- a/docs/versioned-plugins/inputs/tcp-v5.0.1.asciidoc +++ b/docs/versioned-plugins/inputs/tcp-v5.0.1.asciidoc @@ -77,16 +77,16 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |{logstash-ref}/configuration-file-structure.html#array[array]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -97,7 +97,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` When mode is `server`, the address to listen on. @@ -116,7 +116,7 @@ Mode to operate in. `server` listens for client connections, ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. When mode is `server`, the port to listen on. @@ -125,7 +125,7 @@ When mode is `client`, the port to connect to. [id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] ===== `proxy_protocol` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Proxy protocol support, only v1 is supported at this time @@ -134,7 +134,7 @@ http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt [id="{version}-plugins-{type}s-{plugin}-ssl_cert"] ===== `ssl_cert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate path @@ -142,7 +142,7 @@ SSL certificate path [id="{version}-plugins-{type}s-{plugin}-ssl_enable"] ===== `ssl_enable` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable SSL (must be set for other `ssl_` options to take effect). @@ -150,7 +150,7 @@ Enable SSL (must be set for other `ssl_` options to take effect). [id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] ===== `ssl_extra_chain_certs` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` An Array of extra X509 certificates to be added to the certificate chain. @@ -159,7 +159,7 @@ Useful when the CA chain is not necessary in the system store. [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key path @@ -167,7 +167,7 @@ SSL key path [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * Default value is `nil` SSL key passphrase @@ -175,7 +175,7 @@ SSL key passphrase [id="{version}-plugins-{type}s-{plugin}-ssl_verify"] ===== `ssl_verify` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Verify the identity of the other end of the SSL connection against the CA. diff --git a/docs/versioned-plugins/inputs/tcp-v5.0.2.asciidoc b/docs/versioned-plugins/inputs/tcp-v5.0.2.asciidoc index 7ec7440c3..f73a5d5cc 100644 --- a/docs/versioned-plugins/inputs/tcp-v5.0.2.asciidoc +++ b/docs/versioned-plugins/inputs/tcp-v5.0.2.asciidoc @@ -77,16 +77,16 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |{logstash-ref}/configuration-file-structure.html#array[array]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -97,7 +97,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` When mode is `server`, the address to listen on. @@ -116,7 +116,7 @@ Mode to operate in. `server` listens for client connections, ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. When mode is `server`, the port to listen on. @@ -125,7 +125,7 @@ When mode is `client`, the port to connect to. [id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] ===== `proxy_protocol` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Proxy protocol support, only v1 is supported at this time @@ -134,7 +134,7 @@ http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt [id="{version}-plugins-{type}s-{plugin}-ssl_cert"] ===== `ssl_cert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate path @@ -142,7 +142,7 @@ SSL certificate path [id="{version}-plugins-{type}s-{plugin}-ssl_enable"] ===== `ssl_enable` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable SSL (must be set for other `ssl_` options to take effect). @@ -150,7 +150,7 @@ Enable SSL (must be set for other `ssl_` options to take effect). [id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] ===== `ssl_extra_chain_certs` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` An Array of extra X509 certificates to be added to the certificate chain. @@ -159,7 +159,7 @@ Useful when the CA chain is not necessary in the system store. [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key path @@ -167,7 +167,7 @@ SSL key path [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * Default value is `nil` SSL key passphrase @@ -175,7 +175,7 @@ SSL key passphrase [id="{version}-plugins-{type}s-{plugin}-ssl_verify"] ===== `ssl_verify` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Verify the identity of the other end of the SSL connection against the CA. diff --git a/docs/versioned-plugins/inputs/tcp-v5.0.3.asciidoc b/docs/versioned-plugins/inputs/tcp-v5.0.3.asciidoc index 9ae075697..f7a996644 100644 --- a/docs/versioned-plugins/inputs/tcp-v5.0.3.asciidoc +++ b/docs/versioned-plugins/inputs/tcp-v5.0.3.asciidoc @@ -77,16 +77,16 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-mode>> |<>, one of `["server", "client"]`|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-mode>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["server", "client"]`|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|Yes +| <<{version}-plugins-{type}s-{plugin}-proxy_protocol>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-ssl_cert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_enable>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs>> |{logstash-ref}/configuration-file-structure.html#array[array]|No | <<{version}-plugins-{type}s-{plugin}-ssl_key>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-ssl_key_passphrase>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_verify>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -97,7 +97,7 @@ input plugins. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"0.0.0.0"` When mode is `server`, the address to listen on. @@ -116,7 +116,7 @@ Mode to operate in. `server` listens for client connections, ===== `port` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. When mode is `server`, the port to listen on. @@ -125,7 +125,7 @@ When mode is `client`, the port to connect to. [id="{version}-plugins-{type}s-{plugin}-proxy_protocol"] ===== `proxy_protocol` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Proxy protocol support, only v1 is supported at this time @@ -134,7 +134,7 @@ http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt [id="{version}-plugins-{type}s-{plugin}-ssl_cert"] ===== `ssl_cert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL certificate path @@ -142,7 +142,7 @@ SSL certificate path [id="{version}-plugins-{type}s-{plugin}-ssl_enable"] ===== `ssl_enable` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable SSL (must be set for other `ssl_` options to take effect). @@ -150,7 +150,7 @@ Enable SSL (must be set for other `ssl_` options to take effect). [id="{version}-plugins-{type}s-{plugin}-ssl_extra_chain_certs"] ===== `ssl_extra_chain_certs` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` An Array of extra X509 certificates to be added to the certificate chain. @@ -159,7 +159,7 @@ Useful when the CA chain is not necessary in the system store. [id="{version}-plugins-{type}s-{plugin}-ssl_key"] ===== `ssl_key` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. SSL key path @@ -167,7 +167,7 @@ SSL key path [id="{version}-plugins-{type}s-{plugin}-ssl_key_passphrase"] ===== `ssl_key_passphrase` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * Default value is `nil` SSL key passphrase @@ -175,7 +175,7 @@ SSL key passphrase [id="{version}-plugins-{type}s-{plugin}-ssl_verify"] ===== `ssl_verify` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Verify the identity of the other end of the SSL connection against the CA. diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.2.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.2.asciidoc index c4445c489..63c7a398f 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-v7.3.2.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.2.asciidoc @@ -7,12 +7,12 @@ START - GENERATED VARIABLES, DO NOT EDIT! :version: v7.3.2 :release_date: 2017-05-26 :changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.2/CHANGELOG.md -:include_path: ../../../logstash/docs/include +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Elasticsearch @@ -97,62 +97,62 @@ setting in their Logstash config file. [id="{version}-plugins-{type}s-{plugin}-options"] ==== Elasticsearch Output Configuration Options -This plugin supports the following configuration options plus the <> described later. +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No |======================================================================= -Also see <> for a list of options supported by all +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all output plugins.   @@ -160,7 +160,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-action"] ===== `action` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"index"` Protocol agnostic (i.e. non-http, non-java specific) configs go here @@ -181,7 +181,7 @@ For more details on actions, check out the http://www.elastic.co/guide/en/elasti [id="{version}-plugins-{type}s-{plugin}-bulk_path"] ===== `bulk_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to perform the _bulk requests to @@ -190,7 +190,7 @@ this defaults to a concatenation of the path parameter and "_bulk" [id="{version}-plugins-{type}s-{plugin}-cacert"] ===== `cacert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The .cer or .pem file to validate the server's certificate @@ -198,7 +198,7 @@ The .cer or .pem file to validate the server's certificate [id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] ===== `doc_as_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable `doc_as_upsert` for update mode. @@ -207,7 +207,7 @@ Create a new document with source if `document_id` doesn't exist in Elasticsearc [id="{version}-plugins-{type}s-{plugin}-document_id"] ===== `document_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document ID for the index. Useful for overwriting existing entries in @@ -216,7 +216,7 @@ Elasticsearch with the same ID. [id="{version}-plugins-{type}s-{plugin}-document_type"] ===== `document_type` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document type to write events to. Generally you should try to write only @@ -227,7 +227,7 @@ otherwise the document type will be assigned the value of 'logs' [id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] ===== `failure_type_logging_whitelist` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Set the Elasticsearch errors in the whitelist that you don't want to log. @@ -238,7 +238,7 @@ which are `document_already_exists_exception`. ===== `flush_size` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. @@ -246,7 +246,7 @@ which are `document_already_exists_exception`. [id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] ===== `healthcheck_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path where a HEAD request is sent when a backend is marked down @@ -257,7 +257,7 @@ If you have custom firewall rules you may need to change this [id="{version}-plugins-{type}s-{plugin}-hosts"] ===== `hosts` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * Default value is `[//127.0.0.1]` Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. @@ -275,7 +275,7 @@ Any special characters present in the URLs here MUST be URL escaped! This means [id="{version}-plugins-{type}s-{plugin}-http_compression"] ===== `http_compression` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond @@ -284,7 +284,7 @@ Enable gzip compression on requests. Note that response compression is on by def ===== `idle_flush_time` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` @@ -292,7 +292,7 @@ Enable gzip compression on requests. Note that response compression is on by def [id="{version}-plugins-{type}s-{plugin}-index"] ===== `index` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash-%{+YYYY.MM.dd}"` The index to write events to. This can be dynamic using the `%{foo}` syntax. @@ -306,7 +306,7 @@ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/for [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The keystore used to present a certificate to the server. @@ -315,7 +315,7 @@ It can be either .jks or .p12 [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the keystore password @@ -323,7 +323,7 @@ Set the keystore password [id="{version}-plugins-{type}s-{plugin}-manage_template"] ===== `manage_template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` From Logstash 1.3 onwards, a template is applied to Elasticsearch during @@ -342,7 +342,7 @@ API to apply your templates manually. [id="{version}-plugins-{type}s-{plugin}-parameters"] ===== `parameters` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Pass a set of key value pairs as the URL query string. This query string is added @@ -352,7 +352,7 @@ urls that already have query strings, the one specified here will be appended. [id="{version}-plugins-{type}s-{plugin}-parent"] ===== `parent` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` For child documents, ID of the associated parent. @@ -361,7 +361,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password to authenticate to a secure Elasticsearch cluster @@ -369,7 +369,7 @@ Password to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps @@ -380,7 +380,7 @@ not also set this field. That will raise an error at startup [id="{version}-plugins-{type}s-{plugin}-pipeline"] ===== `pipeline` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration @@ -389,7 +389,7 @@ here like `pipeline => "%{INGEST_PIPELINE}"` [id="{version}-plugins-{type}s-{plugin}-pool_max"] ===== `pool_max` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1000` While the output tries to reuse connections efficiently we have a maximum. @@ -400,7 +400,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] ===== `pool_max_per_route` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `100` While the output tries to reuse connections efficiently we have a maximum per endpoint. @@ -411,7 +411,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-proxy"] ===== `proxy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * There is no default value for this setting. Set the address of a forward HTTP proxy. @@ -421,7 +421,7 @@ arguments of the URI type to prevent leaking credentials. [id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] ===== `resurrect_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How frequently, in seconds, to wait between resurrection attempts. @@ -431,7 +431,7 @@ to see if they have come back to life [id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] ===== `retry_initial_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` @@ -439,7 +439,7 @@ Set initial interval in seconds between bulk retries. Doubled on each retry up t [id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] ===== `retry_max_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `64` Set max interval in seconds between bulk retries. @@ -447,7 +447,7 @@ Set max interval in seconds between bulk retries. [id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] ===== `retry_on_conflict` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The number of times Elasticsearch should internally retry an update/upserted document @@ -457,7 +457,7 @@ for more info [id="{version}-plugins-{type}s-{plugin}-routing"] ===== `routing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. A routing override to be applied to all processed events. @@ -466,7 +466,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-script"] ===== `script` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set script name for scripted update mode @@ -474,7 +474,7 @@ Set script name for scripted update mode [id="{version}-plugins-{type}s-{plugin}-script_lang"] ===== `script_lang` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"painless"` Set the language of the used script. If not set, this defaults to painless in ES 5.0 @@ -493,7 +493,7 @@ Define the type of script referenced by "script" variable [id="{version}-plugins-{type}s-{plugin}-script_var_name"] ===== `script_var_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"event"` Set variable name passed to script (scripted update) @@ -501,7 +501,7 @@ Set variable name passed to script (scripted update) [id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] ===== `scripted_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` if enabled, script is in charge of creating non-existent document (scripted update) @@ -509,7 +509,7 @@ if enabled, script is in charge of creating non-existent document (scripted upda [id="{version}-plugins-{type}s-{plugin}-sniffing"] ===== `sniffing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. @@ -521,7 +521,7 @@ manually enter multiple Elasticsearch hosts using the `hosts` parameter. [id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] ===== `sniffing_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How long to wait, in seconds, between sniffing attempts @@ -529,7 +529,7 @@ How long to wait, in seconds, between sniffing attempts [id="{version}-plugins-{type}s-{plugin}-sniffing_path"] ===== `sniffing_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to be used for the sniffing requests @@ -540,7 +540,7 @@ do not use full URL here, only paths, e.g. "/sniff/_nodes/http" [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme @@ -550,7 +550,7 @@ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS U [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] ===== `ssl_certificate_verification` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Option to validate the server's certificate. Disabling this severely compromises security. @@ -560,7 +560,7 @@ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf [id="{version}-plugins-{type}s-{plugin}-template"] ===== `template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. You can set the path to your own template here, if you so desire. @@ -569,7 +569,7 @@ If not set, the included template will be used. [id="{version}-plugins-{type}s-{plugin}-template_name"] ===== `template_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash"` This configuration option defines how the template is named inside Elasticsearch. @@ -583,7 +583,7 @@ where `OldTemplateName` is whatever the former setting was. [id="{version}-plugins-{type}s-{plugin}-template_overwrite"] ===== `template_overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` The template_overwrite option will always overwrite the indicated template @@ -600,7 +600,7 @@ the "logstash" template (i.e. removing all customized settings) [id="{version}-plugins-{type}s-{plugin}-timeout"] ===== `timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If @@ -609,7 +609,7 @@ a timeout occurs, the request will be retried. [id="{version}-plugins-{type}s-{plugin}-truststore"] ===== `truststore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS truststore to validate the server's certificate. @@ -618,7 +618,7 @@ Use either `:truststore` or `:cacert` [id="{version}-plugins-{type}s-{plugin}-truststore_password"] ===== `truststore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -626,7 +626,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-upsert"] ===== `upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set upsert content for update mode.s @@ -635,7 +635,7 @@ Create a new document with this parameter as json string if `document_id` doesn' [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate to a secure Elasticsearch cluster @@ -643,7 +643,7 @@ Username to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] ===== `validate_after_inactivity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. @@ -658,7 +658,7 @@ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. @@ -675,5 +675,5 @@ See https://www.elastic.co/blog/elasticsearch-versioning-support. See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types - +[id="{version}-plugins-{type}s-{plugin}-common-options"] include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.3.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.3.asciidoc index 381cdacb1..f080eb811 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-v7.3.3.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.3.asciidoc @@ -7,12 +7,12 @@ START - GENERATED VARIABLES, DO NOT EDIT! :version: v7.3.3 :release_date: 2017-06-05 :changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.3/CHANGELOG.md -:include_path: ../../../logstash/docs/include +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Elasticsearch @@ -97,62 +97,62 @@ setting in their Logstash config file. [id="{version}-plugins-{type}s-{plugin}-options"] ==== Elasticsearch Output Configuration Options -This plugin supports the following configuration options plus the <> described later. +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No |======================================================================= -Also see <> for a list of options supported by all +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all output plugins.   @@ -160,7 +160,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-action"] ===== `action` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"index"` Protocol agnostic (i.e. non-http, non-java specific) configs go here @@ -181,7 +181,7 @@ For more details on actions, check out the http://www.elastic.co/guide/en/elasti [id="{version}-plugins-{type}s-{plugin}-bulk_path"] ===== `bulk_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to perform the _bulk requests to @@ -190,7 +190,7 @@ this defaults to a concatenation of the path parameter and "_bulk" [id="{version}-plugins-{type}s-{plugin}-cacert"] ===== `cacert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The .cer or .pem file to validate the server's certificate @@ -198,7 +198,7 @@ The .cer or .pem file to validate the server's certificate [id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] ===== `doc_as_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable `doc_as_upsert` for update mode. @@ -207,7 +207,7 @@ Create a new document with source if `document_id` doesn't exist in Elasticsearc [id="{version}-plugins-{type}s-{plugin}-document_id"] ===== `document_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document ID for the index. Useful for overwriting existing entries in @@ -216,7 +216,7 @@ Elasticsearch with the same ID. [id="{version}-plugins-{type}s-{plugin}-document_type"] ===== `document_type` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document type to write events to. Generally you should try to write only @@ -227,7 +227,7 @@ otherwise the document type will be assigned the value of 'logs' [id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] ===== `failure_type_logging_whitelist` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Set the Elasticsearch errors in the whitelist that you don't want to log. @@ -238,7 +238,7 @@ which are `document_already_exists_exception`. ===== `flush_size` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. @@ -246,7 +246,7 @@ which are `document_already_exists_exception`. [id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] ===== `healthcheck_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path where a HEAD request is sent when a backend is marked down @@ -257,7 +257,7 @@ If you have custom firewall rules you may need to change this [id="{version}-plugins-{type}s-{plugin}-hosts"] ===== `hosts` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * Default value is `[//127.0.0.1]` Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. @@ -275,7 +275,7 @@ Any special characters present in the URLs here MUST be URL escaped! This means [id="{version}-plugins-{type}s-{plugin}-http_compression"] ===== `http_compression` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond @@ -284,7 +284,7 @@ Enable gzip compression on requests. Note that response compression is on by def ===== `idle_flush_time` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` @@ -292,7 +292,7 @@ Enable gzip compression on requests. Note that response compression is on by def [id="{version}-plugins-{type}s-{plugin}-index"] ===== `index` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash-%{+YYYY.MM.dd}"` The index to write events to. This can be dynamic using the `%{foo}` syntax. @@ -306,7 +306,7 @@ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/for [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The keystore used to present a certificate to the server. @@ -315,7 +315,7 @@ It can be either .jks or .p12 [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the keystore password @@ -323,7 +323,7 @@ Set the keystore password [id="{version}-plugins-{type}s-{plugin}-manage_template"] ===== `manage_template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` From Logstash 1.3 onwards, a template is applied to Elasticsearch during @@ -342,7 +342,7 @@ API to apply your templates manually. [id="{version}-plugins-{type}s-{plugin}-parameters"] ===== `parameters` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Pass a set of key value pairs as the URL query string. This query string is added @@ -352,7 +352,7 @@ urls that already have query strings, the one specified here will be appended. [id="{version}-plugins-{type}s-{plugin}-parent"] ===== `parent` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` For child documents, ID of the associated parent. @@ -361,7 +361,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password to authenticate to a secure Elasticsearch cluster @@ -369,7 +369,7 @@ Password to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps @@ -380,7 +380,7 @@ not also set this field. That will raise an error at startup [id="{version}-plugins-{type}s-{plugin}-pipeline"] ===== `pipeline` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration @@ -389,7 +389,7 @@ here like `pipeline => "%{INGEST_PIPELINE}"` [id="{version}-plugins-{type}s-{plugin}-pool_max"] ===== `pool_max` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1000` While the output tries to reuse connections efficiently we have a maximum. @@ -400,7 +400,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] ===== `pool_max_per_route` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `100` While the output tries to reuse connections efficiently we have a maximum per endpoint. @@ -411,7 +411,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-proxy"] ===== `proxy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * There is no default value for this setting. Set the address of a forward HTTP proxy. @@ -421,7 +421,7 @@ arguments of the URI type to prevent leaking credentials. [id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] ===== `resurrect_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How frequently, in seconds, to wait between resurrection attempts. @@ -431,7 +431,7 @@ to see if they have come back to life [id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] ===== `retry_initial_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` @@ -439,7 +439,7 @@ Set initial interval in seconds between bulk retries. Doubled on each retry up t [id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] ===== `retry_max_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `64` Set max interval in seconds between bulk retries. @@ -447,7 +447,7 @@ Set max interval in seconds between bulk retries. [id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] ===== `retry_on_conflict` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The number of times Elasticsearch should internally retry an update/upserted document @@ -457,7 +457,7 @@ for more info [id="{version}-plugins-{type}s-{plugin}-routing"] ===== `routing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. A routing override to be applied to all processed events. @@ -466,7 +466,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-script"] ===== `script` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set script name for scripted update mode @@ -474,7 +474,7 @@ Set script name for scripted update mode [id="{version}-plugins-{type}s-{plugin}-script_lang"] ===== `script_lang` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"painless"` Set the language of the used script. If not set, this defaults to painless in ES 5.0 @@ -493,7 +493,7 @@ Define the type of script referenced by "script" variable [id="{version}-plugins-{type}s-{plugin}-script_var_name"] ===== `script_var_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"event"` Set variable name passed to script (scripted update) @@ -501,7 +501,7 @@ Set variable name passed to script (scripted update) [id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] ===== `scripted_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` if enabled, script is in charge of creating non-existent document (scripted update) @@ -509,7 +509,7 @@ if enabled, script is in charge of creating non-existent document (scripted upda [id="{version}-plugins-{type}s-{plugin}-sniffing"] ===== `sniffing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. @@ -521,7 +521,7 @@ manually enter multiple Elasticsearch hosts using the `hosts` parameter. [id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] ===== `sniffing_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How long to wait, in seconds, between sniffing attempts @@ -529,7 +529,7 @@ How long to wait, in seconds, between sniffing attempts [id="{version}-plugins-{type}s-{plugin}-sniffing_path"] ===== `sniffing_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to be used for the sniffing requests @@ -540,7 +540,7 @@ do not use full URL here, only paths, e.g. "/sniff/_nodes/http" [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme @@ -550,7 +550,7 @@ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS U [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] ===== `ssl_certificate_verification` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Option to validate the server's certificate. Disabling this severely compromises security. @@ -560,7 +560,7 @@ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf [id="{version}-plugins-{type}s-{plugin}-template"] ===== `template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. You can set the path to your own template here, if you so desire. @@ -569,7 +569,7 @@ If not set, the included template will be used. [id="{version}-plugins-{type}s-{plugin}-template_name"] ===== `template_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash"` This configuration option defines how the template is named inside Elasticsearch. @@ -583,7 +583,7 @@ where `OldTemplateName` is whatever the former setting was. [id="{version}-plugins-{type}s-{plugin}-template_overwrite"] ===== `template_overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` The template_overwrite option will always overwrite the indicated template @@ -600,7 +600,7 @@ the "logstash" template (i.e. removing all customized settings) [id="{version}-plugins-{type}s-{plugin}-timeout"] ===== `timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If @@ -609,7 +609,7 @@ a timeout occurs, the request will be retried. [id="{version}-plugins-{type}s-{plugin}-truststore"] ===== `truststore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS truststore to validate the server's certificate. @@ -618,7 +618,7 @@ Use either `:truststore` or `:cacert` [id="{version}-plugins-{type}s-{plugin}-truststore_password"] ===== `truststore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -626,7 +626,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-upsert"] ===== `upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set upsert content for update mode.s @@ -635,7 +635,7 @@ Create a new document with this parameter as json string if `document_id` doesn' [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate to a secure Elasticsearch cluster @@ -643,7 +643,7 @@ Username to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] ===== `validate_after_inactivity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. @@ -658,7 +658,7 @@ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. @@ -675,5 +675,5 @@ See https://www.elastic.co/blog/elasticsearch-versioning-support. See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types - +[id="{version}-plugins-{type}s-{plugin}-common-options"] include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.4.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.4.asciidoc index a0fd41326..26f6e40a8 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-v7.3.4.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.4.asciidoc @@ -7,12 +7,12 @@ START - GENERATED VARIABLES, DO NOT EDIT! :version: v7.3.4 :release_date: 2017-06-06 :changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.4/CHANGELOG.md -:include_path: ../../../logstash/docs/include +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Elasticsearch @@ -97,62 +97,62 @@ setting in their Logstash config file. [id="{version}-plugins-{type}s-{plugin}-options"] ==== Elasticsearch Output Configuration Options -This plugin supports the following configuration options plus the <> described later. +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No |======================================================================= -Also see <> for a list of options supported by all +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all output plugins.   @@ -160,7 +160,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-action"] ===== `action` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"index"` Protocol agnostic (i.e. non-http, non-java specific) configs go here @@ -181,7 +181,7 @@ For more details on actions, check out the http://www.elastic.co/guide/en/elasti [id="{version}-plugins-{type}s-{plugin}-bulk_path"] ===== `bulk_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to perform the _bulk requests to @@ -190,7 +190,7 @@ this defaults to a concatenation of the path parameter and "_bulk" [id="{version}-plugins-{type}s-{plugin}-cacert"] ===== `cacert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The .cer or .pem file to validate the server's certificate @@ -198,7 +198,7 @@ The .cer or .pem file to validate the server's certificate [id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] ===== `doc_as_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable `doc_as_upsert` for update mode. @@ -207,7 +207,7 @@ Create a new document with source if `document_id` doesn't exist in Elasticsearc [id="{version}-plugins-{type}s-{plugin}-document_id"] ===== `document_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document ID for the index. Useful for overwriting existing entries in @@ -216,7 +216,7 @@ Elasticsearch with the same ID. [id="{version}-plugins-{type}s-{plugin}-document_type"] ===== `document_type` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document type to write events to. Generally you should try to write only @@ -227,7 +227,7 @@ otherwise the document type will be assigned the value of 'logs' [id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] ===== `failure_type_logging_whitelist` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Set the Elasticsearch errors in the whitelist that you don't want to log. @@ -238,7 +238,7 @@ which are `document_already_exists_exception`. ===== `flush_size` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. @@ -246,7 +246,7 @@ which are `document_already_exists_exception`. [id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] ===== `healthcheck_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path where a HEAD request is sent when a backend is marked down @@ -257,7 +257,7 @@ If you have custom firewall rules you may need to change this [id="{version}-plugins-{type}s-{plugin}-hosts"] ===== `hosts` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * Default value is `[//127.0.0.1]` Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. @@ -275,7 +275,7 @@ Any special characters present in the URLs here MUST be URL escaped! This means [id="{version}-plugins-{type}s-{plugin}-http_compression"] ===== `http_compression` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond @@ -284,7 +284,7 @@ Enable gzip compression on requests. Note that response compression is on by def ===== `idle_flush_time` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` @@ -292,7 +292,7 @@ Enable gzip compression on requests. Note that response compression is on by def [id="{version}-plugins-{type}s-{plugin}-index"] ===== `index` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash-%{+YYYY.MM.dd}"` The index to write events to. This can be dynamic using the `%{foo}` syntax. @@ -306,7 +306,7 @@ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/for [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The keystore used to present a certificate to the server. @@ -315,7 +315,7 @@ It can be either .jks or .p12 [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the keystore password @@ -323,7 +323,7 @@ Set the keystore password [id="{version}-plugins-{type}s-{plugin}-manage_template"] ===== `manage_template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` From Logstash 1.3 onwards, a template is applied to Elasticsearch during @@ -342,7 +342,7 @@ API to apply your templates manually. [id="{version}-plugins-{type}s-{plugin}-parameters"] ===== `parameters` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Pass a set of key value pairs as the URL query string. This query string is added @@ -352,7 +352,7 @@ urls that already have query strings, the one specified here will be appended. [id="{version}-plugins-{type}s-{plugin}-parent"] ===== `parent` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` For child documents, ID of the associated parent. @@ -361,7 +361,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password to authenticate to a secure Elasticsearch cluster @@ -369,7 +369,7 @@ Password to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps @@ -380,7 +380,7 @@ not also set this field. That will raise an error at startup [id="{version}-plugins-{type}s-{plugin}-pipeline"] ===== `pipeline` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration @@ -389,7 +389,7 @@ here like `pipeline => "%{INGEST_PIPELINE}"` [id="{version}-plugins-{type}s-{plugin}-pool_max"] ===== `pool_max` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1000` While the output tries to reuse connections efficiently we have a maximum. @@ -400,7 +400,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] ===== `pool_max_per_route` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `100` While the output tries to reuse connections efficiently we have a maximum per endpoint. @@ -411,7 +411,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-proxy"] ===== `proxy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * There is no default value for this setting. Set the address of a forward HTTP proxy. @@ -421,7 +421,7 @@ arguments of the URI type to prevent leaking credentials. [id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] ===== `resurrect_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How frequently, in seconds, to wait between resurrection attempts. @@ -431,7 +431,7 @@ to see if they have come back to life [id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] ===== `retry_initial_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` @@ -439,7 +439,7 @@ Set initial interval in seconds between bulk retries. Doubled on each retry up t [id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] ===== `retry_max_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `64` Set max interval in seconds between bulk retries. @@ -447,7 +447,7 @@ Set max interval in seconds between bulk retries. [id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] ===== `retry_on_conflict` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The number of times Elasticsearch should internally retry an update/upserted document @@ -457,7 +457,7 @@ for more info [id="{version}-plugins-{type}s-{plugin}-routing"] ===== `routing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. A routing override to be applied to all processed events. @@ -466,7 +466,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-script"] ===== `script` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set script name for scripted update mode @@ -474,7 +474,7 @@ Set script name for scripted update mode [id="{version}-plugins-{type}s-{plugin}-script_lang"] ===== `script_lang` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"painless"` Set the language of the used script. If not set, this defaults to painless in ES 5.0 @@ -493,7 +493,7 @@ Define the type of script referenced by "script" variable [id="{version}-plugins-{type}s-{plugin}-script_var_name"] ===== `script_var_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"event"` Set variable name passed to script (scripted update) @@ -501,7 +501,7 @@ Set variable name passed to script (scripted update) [id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] ===== `scripted_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` if enabled, script is in charge of creating non-existent document (scripted update) @@ -509,7 +509,7 @@ if enabled, script is in charge of creating non-existent document (scripted upda [id="{version}-plugins-{type}s-{plugin}-sniffing"] ===== `sniffing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. @@ -521,7 +521,7 @@ manually enter multiple Elasticsearch hosts using the `hosts` parameter. [id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] ===== `sniffing_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How long to wait, in seconds, between sniffing attempts @@ -529,7 +529,7 @@ How long to wait, in seconds, between sniffing attempts [id="{version}-plugins-{type}s-{plugin}-sniffing_path"] ===== `sniffing_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to be used for the sniffing requests @@ -540,7 +540,7 @@ do not use full URL here, only paths, e.g. "/sniff/_nodes/http" [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme @@ -550,7 +550,7 @@ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS U [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] ===== `ssl_certificate_verification` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Option to validate the server's certificate. Disabling this severely compromises security. @@ -560,7 +560,7 @@ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf [id="{version}-plugins-{type}s-{plugin}-template"] ===== `template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. You can set the path to your own template here, if you so desire. @@ -569,7 +569,7 @@ If not set, the included template will be used. [id="{version}-plugins-{type}s-{plugin}-template_name"] ===== `template_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash"` This configuration option defines how the template is named inside Elasticsearch. @@ -583,7 +583,7 @@ where `OldTemplateName` is whatever the former setting was. [id="{version}-plugins-{type}s-{plugin}-template_overwrite"] ===== `template_overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` The template_overwrite option will always overwrite the indicated template @@ -600,7 +600,7 @@ the "logstash" template (i.e. removing all customized settings) [id="{version}-plugins-{type}s-{plugin}-timeout"] ===== `timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If @@ -609,7 +609,7 @@ a timeout occurs, the request will be retried. [id="{version}-plugins-{type}s-{plugin}-truststore"] ===== `truststore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS truststore to validate the server's certificate. @@ -618,7 +618,7 @@ Use either `:truststore` or `:cacert` [id="{version}-plugins-{type}s-{plugin}-truststore_password"] ===== `truststore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -626,7 +626,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-upsert"] ===== `upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set upsert content for update mode.s @@ -635,7 +635,7 @@ Create a new document with this parameter as json string if `document_id` doesn' [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate to a secure Elasticsearch cluster @@ -643,7 +643,7 @@ Username to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] ===== `validate_after_inactivity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. @@ -658,7 +658,7 @@ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. @@ -675,5 +675,5 @@ See https://www.elastic.co/blog/elasticsearch-versioning-support. See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types - +[id="{version}-plugins-{type}s-{plugin}-common-options"] include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.5.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.5.asciidoc index 7a76e1b95..1c9deab35 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-v7.3.5.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.5.asciidoc @@ -7,12 +7,12 @@ START - GENERATED VARIABLES, DO NOT EDIT! :version: v7.3.5 :release_date: 2017-06-09 :changelog_url: https://github.com/logstash-plugins/logstash-output-elasticsearch/blob/v7.3.5/CHANGELOG.md -:include_path: ../../../logstash/docs/include +:include_path: ../../../../logstash/docs/include /////////////////////////////////////////// END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Elasticsearch @@ -97,62 +97,62 @@ setting in their Logstash config file. [id="{version}-plugins-{type}s-{plugin}-options"] ==== Elasticsearch Output Configuration Options -This plugin supports the following configuration options plus the <> described later. +This plugin supports the following configuration options plus the <<{version}-plugins-{type}s-{plugin}-common-options>> described later. [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No |======================================================================= -Also see <> for a list of options supported by all +Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all output plugins.   @@ -160,7 +160,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-action"] ===== `action` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"index"` Protocol agnostic (i.e. non-http, non-java specific) configs go here @@ -181,7 +181,7 @@ For more details on actions, check out the http://www.elastic.co/guide/en/elasti [id="{version}-plugins-{type}s-{plugin}-bulk_path"] ===== `bulk_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to perform the _bulk requests to @@ -190,7 +190,7 @@ this defaults to a concatenation of the path parameter and "_bulk" [id="{version}-plugins-{type}s-{plugin}-cacert"] ===== `cacert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The .cer or .pem file to validate the server's certificate @@ -198,7 +198,7 @@ The .cer or .pem file to validate the server's certificate [id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] ===== `doc_as_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable `doc_as_upsert` for update mode. @@ -207,7 +207,7 @@ Create a new document with source if `document_id` doesn't exist in Elasticsearc [id="{version}-plugins-{type}s-{plugin}-document_id"] ===== `document_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document ID for the index. Useful for overwriting existing entries in @@ -216,7 +216,7 @@ Elasticsearch with the same ID. [id="{version}-plugins-{type}s-{plugin}-document_type"] ===== `document_type` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document type to write events to. Generally you should try to write only @@ -227,7 +227,7 @@ otherwise the document type will be assigned the value of 'logs' [id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] ===== `failure_type_logging_whitelist` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Set the Elasticsearch errors in the whitelist that you don't want to log. @@ -238,7 +238,7 @@ which are `document_already_exists_exception`. ===== `flush_size` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. @@ -246,7 +246,7 @@ which are `document_already_exists_exception`. [id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] ===== `healthcheck_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path where a HEAD request is sent when a backend is marked down @@ -257,7 +257,7 @@ If you have custom firewall rules you may need to change this [id="{version}-plugins-{type}s-{plugin}-hosts"] ===== `hosts` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * Default value is `[//127.0.0.1]` Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. @@ -275,7 +275,7 @@ Any special characters present in the URLs here MUST be URL escaped! This means [id="{version}-plugins-{type}s-{plugin}-http_compression"] ===== `http_compression` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond @@ -284,7 +284,7 @@ Enable gzip compression on requests. Note that response compression is on by def ===== `idle_flush_time` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` @@ -292,7 +292,7 @@ Enable gzip compression on requests. Note that response compression is on by def [id="{version}-plugins-{type}s-{plugin}-index"] ===== `index` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash-%{+YYYY.MM.dd}"` The index to write events to. This can be dynamic using the `%{foo}` syntax. @@ -306,7 +306,7 @@ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/for [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The keystore used to present a certificate to the server. @@ -315,7 +315,7 @@ It can be either .jks or .p12 [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the keystore password @@ -323,7 +323,7 @@ Set the keystore password [id="{version}-plugins-{type}s-{plugin}-manage_template"] ===== `manage_template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` From Logstash 1.3 onwards, a template is applied to Elasticsearch during @@ -342,7 +342,7 @@ API to apply your templates manually. [id="{version}-plugins-{type}s-{plugin}-parameters"] ===== `parameters` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Pass a set of key value pairs as the URL query string. This query string is added @@ -352,7 +352,7 @@ urls that already have query strings, the one specified here will be appended. [id="{version}-plugins-{type}s-{plugin}-parent"] ===== `parent` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` For child documents, ID of the associated parent. @@ -361,7 +361,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password to authenticate to a secure Elasticsearch cluster @@ -369,7 +369,7 @@ Password to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps @@ -380,7 +380,7 @@ not also set this field. That will raise an error at startup [id="{version}-plugins-{type}s-{plugin}-pipeline"] ===== `pipeline` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration @@ -389,7 +389,7 @@ here like `pipeline => "%{INGEST_PIPELINE}"` [id="{version}-plugins-{type}s-{plugin}-pool_max"] ===== `pool_max` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1000` While the output tries to reuse connections efficiently we have a maximum. @@ -400,7 +400,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] ===== `pool_max_per_route` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `100` While the output tries to reuse connections efficiently we have a maximum per endpoint. @@ -411,7 +411,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-proxy"] ===== `proxy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * There is no default value for this setting. Set the address of a forward HTTP proxy. @@ -421,7 +421,7 @@ arguments of the URI type to prevent leaking credentials. [id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] ===== `resurrect_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How frequently, in seconds, to wait between resurrection attempts. @@ -431,7 +431,7 @@ to see if they have come back to life [id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] ===== `retry_initial_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` @@ -439,7 +439,7 @@ Set initial interval in seconds between bulk retries. Doubled on each retry up t [id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] ===== `retry_max_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `64` Set max interval in seconds between bulk retries. @@ -447,7 +447,7 @@ Set max interval in seconds between bulk retries. [id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] ===== `retry_on_conflict` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The number of times Elasticsearch should internally retry an update/upserted document @@ -457,7 +457,7 @@ for more info [id="{version}-plugins-{type}s-{plugin}-routing"] ===== `routing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. A routing override to be applied to all processed events. @@ -466,7 +466,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-script"] ===== `script` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set script name for scripted update mode @@ -474,7 +474,7 @@ Set script name for scripted update mode [id="{version}-plugins-{type}s-{plugin}-script_lang"] ===== `script_lang` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"painless"` Set the language of the used script. If not set, this defaults to painless in ES 5.0 @@ -493,7 +493,7 @@ Define the type of script referenced by "script" variable [id="{version}-plugins-{type}s-{plugin}-script_var_name"] ===== `script_var_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"event"` Set variable name passed to script (scripted update) @@ -501,7 +501,7 @@ Set variable name passed to script (scripted update) [id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] ===== `scripted_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` if enabled, script is in charge of creating non-existent document (scripted update) @@ -509,7 +509,7 @@ if enabled, script is in charge of creating non-existent document (scripted upda [id="{version}-plugins-{type}s-{plugin}-sniffing"] ===== `sniffing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. @@ -521,7 +521,7 @@ manually enter multiple Elasticsearch hosts using the `hosts` parameter. [id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] ===== `sniffing_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How long to wait, in seconds, between sniffing attempts @@ -529,7 +529,7 @@ How long to wait, in seconds, between sniffing attempts [id="{version}-plugins-{type}s-{plugin}-sniffing_path"] ===== `sniffing_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to be used for the sniffing requests @@ -540,7 +540,7 @@ do not use full URL here, only paths, e.g. "/sniff/_nodes/http" [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme @@ -550,7 +550,7 @@ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS U [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] ===== `ssl_certificate_verification` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Option to validate the server's certificate. Disabling this severely compromises security. @@ -560,7 +560,7 @@ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf [id="{version}-plugins-{type}s-{plugin}-template"] ===== `template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. You can set the path to your own template here, if you so desire. @@ -569,7 +569,7 @@ If not set, the included template will be used. [id="{version}-plugins-{type}s-{plugin}-template_name"] ===== `template_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash"` This configuration option defines how the template is named inside Elasticsearch. @@ -583,7 +583,7 @@ where `OldTemplateName` is whatever the former setting was. [id="{version}-plugins-{type}s-{plugin}-template_overwrite"] ===== `template_overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` The template_overwrite option will always overwrite the indicated template @@ -600,7 +600,7 @@ the "logstash" template (i.e. removing all customized settings) [id="{version}-plugins-{type}s-{plugin}-timeout"] ===== `timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If @@ -609,7 +609,7 @@ a timeout occurs, the request will be retried. [id="{version}-plugins-{type}s-{plugin}-truststore"] ===== `truststore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS truststore to validate the server's certificate. @@ -618,7 +618,7 @@ Use either `:truststore` or `:cacert` [id="{version}-plugins-{type}s-{plugin}-truststore_password"] ===== `truststore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -626,7 +626,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-upsert"] ===== `upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set upsert content for update mode.s @@ -635,7 +635,7 @@ Create a new document with this parameter as json string if `document_id` doesn' [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate to a secure Elasticsearch cluster @@ -643,7 +643,7 @@ Username to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] ===== `validate_after_inactivity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. @@ -658,7 +658,7 @@ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. @@ -675,5 +675,5 @@ See https://www.elastic.co/blog/elasticsearch-versioning-support. See also https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html#_version_types - +[id="{version}-plugins-{type}s-{plugin}-common-options"] include::{include_path}/{type}.asciidoc[] diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.6.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.6.asciidoc index d42111b16..f62ae43cf 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-v7.3.6.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.6.asciidoc @@ -12,7 +12,7 @@ START - GENERATED VARIABLES, DO NOT EDIT! END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Elasticsearch output plugin {version} @@ -102,54 +102,54 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -160,7 +160,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-action"] ===== `action` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"index"` Protocol agnostic (i.e. non-http, non-java specific) configs go here @@ -181,7 +181,7 @@ For more details on actions, check out the http://www.elastic.co/guide/en/elasti [id="{version}-plugins-{type}s-{plugin}-bulk_path"] ===== `bulk_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to perform the _bulk requests to @@ -190,7 +190,7 @@ this defaults to a concatenation of the path parameter and "_bulk" [id="{version}-plugins-{type}s-{plugin}-cacert"] ===== `cacert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The .cer or .pem file to validate the server's certificate @@ -198,7 +198,7 @@ The .cer or .pem file to validate the server's certificate [id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] ===== `doc_as_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable `doc_as_upsert` for update mode. @@ -207,7 +207,7 @@ Create a new document with source if `document_id` doesn't exist in Elasticsearc [id="{version}-plugins-{type}s-{plugin}-document_id"] ===== `document_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document ID for the index. Useful for overwriting existing entries in @@ -216,7 +216,7 @@ Elasticsearch with the same ID. [id="{version}-plugins-{type}s-{plugin}-document_type"] ===== `document_type` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document type to write events to. Generally you should try to write only @@ -227,7 +227,7 @@ otherwise the document type will be assigned the value of 'logs' [id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] ===== `failure_type_logging_whitelist` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Set the Elasticsearch errors in the whitelist that you don't want to log. @@ -238,7 +238,7 @@ which are `document_already_exists_exception`. ===== `flush_size` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. @@ -246,7 +246,7 @@ which are `document_already_exists_exception`. [id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] ===== `healthcheck_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path where a HEAD request is sent when a backend is marked down @@ -257,7 +257,7 @@ If you have custom firewall rules you may need to change this [id="{version}-plugins-{type}s-{plugin}-hosts"] ===== `hosts` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * Default value is `[//127.0.0.1]` Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. @@ -275,7 +275,7 @@ Any special characters present in the URLs here MUST be URL escaped! This means [id="{version}-plugins-{type}s-{plugin}-http_compression"] ===== `http_compression` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond @@ -284,7 +284,7 @@ Enable gzip compression on requests. Note that response compression is on by def ===== `idle_flush_time` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` @@ -292,7 +292,7 @@ Enable gzip compression on requests. Note that response compression is on by def [id="{version}-plugins-{type}s-{plugin}-index"] ===== `index` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash-%{+YYYY.MM.dd}"` The index to write events to. This can be dynamic using the `%{foo}` syntax. @@ -306,7 +306,7 @@ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/for [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The keystore used to present a certificate to the server. @@ -315,7 +315,7 @@ It can be either .jks or .p12 [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the keystore password @@ -323,7 +323,7 @@ Set the keystore password [id="{version}-plugins-{type}s-{plugin}-manage_template"] ===== `manage_template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` From Logstash 1.3 onwards, a template is applied to Elasticsearch during @@ -342,7 +342,7 @@ API to apply your templates manually. [id="{version}-plugins-{type}s-{plugin}-parameters"] ===== `parameters` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Pass a set of key value pairs as the URL query string. This query string is added @@ -352,7 +352,7 @@ urls that already have query strings, the one specified here will be appended. [id="{version}-plugins-{type}s-{plugin}-parent"] ===== `parent` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` For child documents, ID of the associated parent. @@ -361,7 +361,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password to authenticate to a secure Elasticsearch cluster @@ -369,7 +369,7 @@ Password to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps @@ -380,7 +380,7 @@ not also set this field. That will raise an error at startup [id="{version}-plugins-{type}s-{plugin}-pipeline"] ===== `pipeline` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration @@ -389,7 +389,7 @@ here like `pipeline => "%{INGEST_PIPELINE}"` [id="{version}-plugins-{type}s-{plugin}-pool_max"] ===== `pool_max` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1000` While the output tries to reuse connections efficiently we have a maximum. @@ -400,7 +400,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] ===== `pool_max_per_route` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `100` While the output tries to reuse connections efficiently we have a maximum per endpoint. @@ -411,7 +411,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-proxy"] ===== `proxy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * There is no default value for this setting. Set the address of a forward HTTP proxy. @@ -421,7 +421,7 @@ arguments of the URI type to prevent leaking credentials. [id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] ===== `resurrect_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How frequently, in seconds, to wait between resurrection attempts. @@ -431,7 +431,7 @@ to see if they have come back to life [id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] ===== `retry_initial_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` @@ -439,7 +439,7 @@ Set initial interval in seconds between bulk retries. Doubled on each retry up t [id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] ===== `retry_max_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `64` Set max interval in seconds between bulk retries. @@ -447,7 +447,7 @@ Set max interval in seconds between bulk retries. [id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] ===== `retry_on_conflict` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The number of times Elasticsearch should internally retry an update/upserted document @@ -457,7 +457,7 @@ for more info [id="{version}-plugins-{type}s-{plugin}-routing"] ===== `routing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. A routing override to be applied to all processed events. @@ -466,7 +466,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-script"] ===== `script` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set script name for scripted update mode @@ -474,7 +474,7 @@ Set script name for scripted update mode [id="{version}-plugins-{type}s-{plugin}-script_lang"] ===== `script_lang` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"painless"` Set the language of the used script. If not set, this defaults to painless in ES 5.0 @@ -493,7 +493,7 @@ Define the type of script referenced by "script" variable [id="{version}-plugins-{type}s-{plugin}-script_var_name"] ===== `script_var_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"event"` Set variable name passed to script (scripted update) @@ -501,7 +501,7 @@ Set variable name passed to script (scripted update) [id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] ===== `scripted_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` if enabled, script is in charge of creating non-existent document (scripted update) @@ -509,7 +509,7 @@ if enabled, script is in charge of creating non-existent document (scripted upda [id="{version}-plugins-{type}s-{plugin}-sniffing"] ===== `sniffing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. @@ -521,7 +521,7 @@ manually enter multiple Elasticsearch hosts using the `hosts` parameter. [id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] ===== `sniffing_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How long to wait, in seconds, between sniffing attempts @@ -529,7 +529,7 @@ How long to wait, in seconds, between sniffing attempts [id="{version}-plugins-{type}s-{plugin}-sniffing_path"] ===== `sniffing_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to be used for the sniffing requests @@ -540,7 +540,7 @@ do not use full URL here, only paths, e.g. "/sniff/_nodes/http" [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme @@ -550,7 +550,7 @@ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS U [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] ===== `ssl_certificate_verification` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Option to validate the server's certificate. Disabling this severely compromises security. @@ -560,7 +560,7 @@ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf [id="{version}-plugins-{type}s-{plugin}-template"] ===== `template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. You can set the path to your own template here, if you so desire. @@ -569,7 +569,7 @@ If not set, the included template will be used. [id="{version}-plugins-{type}s-{plugin}-template_name"] ===== `template_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash"` This configuration option defines how the template is named inside Elasticsearch. @@ -583,7 +583,7 @@ where `OldTemplateName` is whatever the former setting was. [id="{version}-plugins-{type}s-{plugin}-template_overwrite"] ===== `template_overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` The template_overwrite option will always overwrite the indicated template @@ -600,7 +600,7 @@ the "logstash" template (i.e. removing all customized settings) [id="{version}-plugins-{type}s-{plugin}-timeout"] ===== `timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If @@ -609,7 +609,7 @@ a timeout occurs, the request will be retried. [id="{version}-plugins-{type}s-{plugin}-truststore"] ===== `truststore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS truststore to validate the server's certificate. @@ -618,7 +618,7 @@ Use either `:truststore` or `:cacert` [id="{version}-plugins-{type}s-{plugin}-truststore_password"] ===== `truststore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -626,7 +626,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-upsert"] ===== `upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set upsert content for update mode.s @@ -635,7 +635,7 @@ Create a new document with this parameter as json string if `document_id` doesn' [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate to a secure Elasticsearch cluster @@ -643,7 +643,7 @@ Username to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] ===== `validate_after_inactivity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. @@ -658,7 +658,7 @@ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.7.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.7.asciidoc index b04856a5f..f30c7938a 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-v7.3.7.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.7.asciidoc @@ -102,54 +102,54 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -160,7 +160,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-action"] ===== `action` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"index"` Protocol agnostic (i.e. non-http, non-java specific) configs go here @@ -181,7 +181,7 @@ For more details on actions, check out the http://www.elastic.co/guide/en/elasti [id="{version}-plugins-{type}s-{plugin}-bulk_path"] ===== `bulk_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to perform the _bulk requests to @@ -190,7 +190,7 @@ this defaults to a concatenation of the path parameter and "_bulk" [id="{version}-plugins-{type}s-{plugin}-cacert"] ===== `cacert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The .cer or .pem file to validate the server's certificate @@ -198,7 +198,7 @@ The .cer or .pem file to validate the server's certificate [id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] ===== `doc_as_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable `doc_as_upsert` for update mode. @@ -207,7 +207,7 @@ Create a new document with source if `document_id` doesn't exist in Elasticsearc [id="{version}-plugins-{type}s-{plugin}-document_id"] ===== `document_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document ID for the index. Useful for overwriting existing entries in @@ -216,7 +216,7 @@ Elasticsearch with the same ID. [id="{version}-plugins-{type}s-{plugin}-document_type"] ===== `document_type` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document type to write events to. Generally you should try to write only @@ -227,7 +227,7 @@ otherwise the document type will be assigned the value of 'logs' [id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] ===== `failure_type_logging_whitelist` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Set the Elasticsearch errors in the whitelist that you don't want to log. @@ -238,7 +238,7 @@ which are `document_already_exists_exception`. ===== `flush_size` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. @@ -246,7 +246,7 @@ which are `document_already_exists_exception`. [id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] ===== `healthcheck_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path where a HEAD request is sent when a backend is marked down @@ -257,7 +257,7 @@ If you have custom firewall rules you may need to change this [id="{version}-plugins-{type}s-{plugin}-hosts"] ===== `hosts` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * Default value is `[//127.0.0.1]` Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. @@ -275,7 +275,7 @@ Any special characters present in the URLs here MUST be URL escaped! This means [id="{version}-plugins-{type}s-{plugin}-http_compression"] ===== `http_compression` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond @@ -284,7 +284,7 @@ Enable gzip compression on requests. Note that response compression is on by def ===== `idle_flush_time` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` @@ -292,7 +292,7 @@ Enable gzip compression on requests. Note that response compression is on by def [id="{version}-plugins-{type}s-{plugin}-index"] ===== `index` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash-%{+YYYY.MM.dd}"` The index to write events to. This can be dynamic using the `%{foo}` syntax. @@ -306,7 +306,7 @@ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/for [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The keystore used to present a certificate to the server. @@ -315,7 +315,7 @@ It can be either .jks or .p12 [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the keystore password @@ -323,7 +323,7 @@ Set the keystore password [id="{version}-plugins-{type}s-{plugin}-manage_template"] ===== `manage_template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` From Logstash 1.3 onwards, a template is applied to Elasticsearch during @@ -342,7 +342,7 @@ API to apply your templates manually. [id="{version}-plugins-{type}s-{plugin}-parameters"] ===== `parameters` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Pass a set of key value pairs as the URL query string. This query string is added @@ -352,7 +352,7 @@ urls that already have query strings, the one specified here will be appended. [id="{version}-plugins-{type}s-{plugin}-parent"] ===== `parent` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` For child documents, ID of the associated parent. @@ -361,7 +361,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password to authenticate to a secure Elasticsearch cluster @@ -369,7 +369,7 @@ Password to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps @@ -380,7 +380,7 @@ not also set this field. That will raise an error at startup [id="{version}-plugins-{type}s-{plugin}-pipeline"] ===== `pipeline` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration @@ -389,7 +389,7 @@ here like `pipeline => "%{INGEST_PIPELINE}"` [id="{version}-plugins-{type}s-{plugin}-pool_max"] ===== `pool_max` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1000` While the output tries to reuse connections efficiently we have a maximum. @@ -400,7 +400,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] ===== `pool_max_per_route` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `100` While the output tries to reuse connections efficiently we have a maximum per endpoint. @@ -411,7 +411,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-proxy"] ===== `proxy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * There is no default value for this setting. Set the address of a forward HTTP proxy. @@ -421,7 +421,7 @@ arguments of the URI type to prevent leaking credentials. [id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] ===== `resurrect_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How frequently, in seconds, to wait between resurrection attempts. @@ -431,7 +431,7 @@ to see if they have come back to life [id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] ===== `retry_initial_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` @@ -439,7 +439,7 @@ Set initial interval in seconds between bulk retries. Doubled on each retry up t [id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] ===== `retry_max_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `64` Set max interval in seconds between bulk retries. @@ -447,7 +447,7 @@ Set max interval in seconds between bulk retries. [id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] ===== `retry_on_conflict` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The number of times Elasticsearch should internally retry an update/upserted document @@ -457,7 +457,7 @@ for more info [id="{version}-plugins-{type}s-{plugin}-routing"] ===== `routing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. A routing override to be applied to all processed events. @@ -466,7 +466,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-script"] ===== `script` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set script name for scripted update mode @@ -474,7 +474,7 @@ Set script name for scripted update mode [id="{version}-plugins-{type}s-{plugin}-script_lang"] ===== `script_lang` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"painless"` Set the language of the used script. If not set, this defaults to painless in ES 5.0 @@ -493,7 +493,7 @@ Define the type of script referenced by "script" variable [id="{version}-plugins-{type}s-{plugin}-script_var_name"] ===== `script_var_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"event"` Set variable name passed to script (scripted update) @@ -501,7 +501,7 @@ Set variable name passed to script (scripted update) [id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] ===== `scripted_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` if enabled, script is in charge of creating non-existent document (scripted update) @@ -509,7 +509,7 @@ if enabled, script is in charge of creating non-existent document (scripted upda [id="{version}-plugins-{type}s-{plugin}-sniffing"] ===== `sniffing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. @@ -521,7 +521,7 @@ manually enter multiple Elasticsearch hosts using the `hosts` parameter. [id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] ===== `sniffing_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How long to wait, in seconds, between sniffing attempts @@ -529,7 +529,7 @@ How long to wait, in seconds, between sniffing attempts [id="{version}-plugins-{type}s-{plugin}-sniffing_path"] ===== `sniffing_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to be used for the sniffing requests @@ -540,7 +540,7 @@ do not use full URL here, only paths, e.g. "/sniff/_nodes/http" [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme @@ -550,7 +550,7 @@ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS U [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] ===== `ssl_certificate_verification` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Option to validate the server's certificate. Disabling this severely compromises security. @@ -560,7 +560,7 @@ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf [id="{version}-plugins-{type}s-{plugin}-template"] ===== `template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. You can set the path to your own template here, if you so desire. @@ -569,7 +569,7 @@ If not set, the included template will be used. [id="{version}-plugins-{type}s-{plugin}-template_name"] ===== `template_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash"` This configuration option defines how the template is named inside Elasticsearch. @@ -583,7 +583,7 @@ where `OldTemplateName` is whatever the former setting was. [id="{version}-plugins-{type}s-{plugin}-template_overwrite"] ===== `template_overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` The template_overwrite option will always overwrite the indicated template @@ -600,7 +600,7 @@ the "logstash" template (i.e. removing all customized settings) [id="{version}-plugins-{type}s-{plugin}-timeout"] ===== `timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If @@ -609,7 +609,7 @@ a timeout occurs, the request will be retried. [id="{version}-plugins-{type}s-{plugin}-truststore"] ===== `truststore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS truststore to validate the server's certificate. @@ -618,7 +618,7 @@ Use either `:truststore` or `:cacert` [id="{version}-plugins-{type}s-{plugin}-truststore_password"] ===== `truststore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -626,7 +626,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-upsert"] ===== `upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set upsert content for update mode.s @@ -635,7 +635,7 @@ Create a new document with this parameter as json string if `document_id` doesn' [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate to a secure Elasticsearch cluster @@ -643,7 +643,7 @@ Username to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] ===== `validate_after_inactivity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. @@ -658,7 +658,7 @@ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.3.8.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.3.8.asciidoc index 57dbcc3fd..6056213a4 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-v7.3.8.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.3.8.asciidoc @@ -102,54 +102,54 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -160,7 +160,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-action"] ===== `action` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"index"` Protocol agnostic (i.e. non-http, non-java specific) configs go here @@ -181,7 +181,7 @@ For more details on actions, check out the http://www.elastic.co/guide/en/elasti [id="{version}-plugins-{type}s-{plugin}-bulk_path"] ===== `bulk_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to perform the _bulk requests to @@ -190,7 +190,7 @@ this defaults to a concatenation of the path parameter and "_bulk" [id="{version}-plugins-{type}s-{plugin}-cacert"] ===== `cacert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The .cer or .pem file to validate the server's certificate @@ -198,7 +198,7 @@ The .cer or .pem file to validate the server's certificate [id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] ===== `doc_as_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable `doc_as_upsert` for update mode. @@ -207,7 +207,7 @@ Create a new document with source if `document_id` doesn't exist in Elasticsearc [id="{version}-plugins-{type}s-{plugin}-document_id"] ===== `document_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document ID for the index. Useful for overwriting existing entries in @@ -216,7 +216,7 @@ Elasticsearch with the same ID. [id="{version}-plugins-{type}s-{plugin}-document_type"] ===== `document_type` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document type to write events to. Generally you should try to write only @@ -227,7 +227,7 @@ otherwise the document type will be assigned the value of 'logs' [id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] ===== `failure_type_logging_whitelist` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Set the Elasticsearch errors in the whitelist that you don't want to log. @@ -238,7 +238,7 @@ which are `document_already_exists_exception`. ===== `flush_size` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. @@ -246,7 +246,7 @@ which are `document_already_exists_exception`. [id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] ===== `healthcheck_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path where a HEAD request is sent when a backend is marked down @@ -257,7 +257,7 @@ If you have custom firewall rules you may need to change this [id="{version}-plugins-{type}s-{plugin}-hosts"] ===== `hosts` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * Default value is `[//127.0.0.1]` Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. @@ -275,7 +275,7 @@ Any special characters present in the URLs here MUST be URL escaped! This means [id="{version}-plugins-{type}s-{plugin}-http_compression"] ===== `http_compression` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond @@ -284,7 +284,7 @@ Enable gzip compression on requests. Note that response compression is on by def ===== `idle_flush_time` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` @@ -292,7 +292,7 @@ Enable gzip compression on requests. Note that response compression is on by def [id="{version}-plugins-{type}s-{plugin}-index"] ===== `index` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash-%{+YYYY.MM.dd}"` The index to write events to. This can be dynamic using the `%{foo}` syntax. @@ -306,7 +306,7 @@ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/for [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The keystore used to present a certificate to the server. @@ -315,7 +315,7 @@ It can be either .jks or .p12 [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the keystore password @@ -323,7 +323,7 @@ Set the keystore password [id="{version}-plugins-{type}s-{plugin}-manage_template"] ===== `manage_template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` From Logstash 1.3 onwards, a template is applied to Elasticsearch during @@ -342,7 +342,7 @@ API to apply your templates manually. [id="{version}-plugins-{type}s-{plugin}-parameters"] ===== `parameters` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Pass a set of key value pairs as the URL query string. This query string is added @@ -352,7 +352,7 @@ urls that already have query strings, the one specified here will be appended. [id="{version}-plugins-{type}s-{plugin}-parent"] ===== `parent` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` For child documents, ID of the associated parent. @@ -361,7 +361,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password to authenticate to a secure Elasticsearch cluster @@ -369,7 +369,7 @@ Password to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps @@ -380,7 +380,7 @@ not also set this field. That will raise an error at startup [id="{version}-plugins-{type}s-{plugin}-pipeline"] ===== `pipeline` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration @@ -389,7 +389,7 @@ here like `pipeline => "%{INGEST_PIPELINE}"` [id="{version}-plugins-{type}s-{plugin}-pool_max"] ===== `pool_max` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1000` While the output tries to reuse connections efficiently we have a maximum. @@ -400,7 +400,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] ===== `pool_max_per_route` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `100` While the output tries to reuse connections efficiently we have a maximum per endpoint. @@ -411,7 +411,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-proxy"] ===== `proxy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * There is no default value for this setting. Set the address of a forward HTTP proxy. @@ -421,7 +421,7 @@ arguments of the URI type to prevent leaking credentials. [id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] ===== `resurrect_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How frequently, in seconds, to wait between resurrection attempts. @@ -431,7 +431,7 @@ to see if they have come back to life [id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] ===== `retry_initial_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` @@ -439,7 +439,7 @@ Set initial interval in seconds between bulk retries. Doubled on each retry up t [id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] ===== `retry_max_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `64` Set max interval in seconds between bulk retries. @@ -447,7 +447,7 @@ Set max interval in seconds between bulk retries. [id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] ===== `retry_on_conflict` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The number of times Elasticsearch should internally retry an update/upserted document @@ -457,7 +457,7 @@ for more info [id="{version}-plugins-{type}s-{plugin}-routing"] ===== `routing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. A routing override to be applied to all processed events. @@ -466,7 +466,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-script"] ===== `script` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set script name for scripted update mode @@ -474,7 +474,7 @@ Set script name for scripted update mode [id="{version}-plugins-{type}s-{plugin}-script_lang"] ===== `script_lang` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"painless"` Set the language of the used script. If not set, this defaults to painless in ES 5.0 @@ -493,7 +493,7 @@ Define the type of script referenced by "script" variable [id="{version}-plugins-{type}s-{plugin}-script_var_name"] ===== `script_var_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"event"` Set variable name passed to script (scripted update) @@ -501,7 +501,7 @@ Set variable name passed to script (scripted update) [id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] ===== `scripted_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` if enabled, script is in charge of creating non-existent document (scripted update) @@ -509,7 +509,7 @@ if enabled, script is in charge of creating non-existent document (scripted upda [id="{version}-plugins-{type}s-{plugin}-sniffing"] ===== `sniffing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. @@ -521,7 +521,7 @@ manually enter multiple Elasticsearch hosts using the `hosts` parameter. [id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] ===== `sniffing_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How long to wait, in seconds, between sniffing attempts @@ -529,7 +529,7 @@ How long to wait, in seconds, between sniffing attempts [id="{version}-plugins-{type}s-{plugin}-sniffing_path"] ===== `sniffing_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to be used for the sniffing requests @@ -540,7 +540,7 @@ do not use full URL here, only paths, e.g. "/sniff/_nodes/http" [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme @@ -550,7 +550,7 @@ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS U [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] ===== `ssl_certificate_verification` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Option to validate the server's certificate. Disabling this severely compromises security. @@ -560,7 +560,7 @@ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf [id="{version}-plugins-{type}s-{plugin}-template"] ===== `template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. You can set the path to your own template here, if you so desire. @@ -569,7 +569,7 @@ If not set, the included template will be used. [id="{version}-plugins-{type}s-{plugin}-template_name"] ===== `template_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash"` This configuration option defines how the template is named inside Elasticsearch. @@ -583,7 +583,7 @@ where `OldTemplateName` is whatever the former setting was. [id="{version}-plugins-{type}s-{plugin}-template_overwrite"] ===== `template_overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` The template_overwrite option will always overwrite the indicated template @@ -600,7 +600,7 @@ the "logstash" template (i.e. removing all customized settings) [id="{version}-plugins-{type}s-{plugin}-timeout"] ===== `timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If @@ -609,7 +609,7 @@ a timeout occurs, the request will be retried. [id="{version}-plugins-{type}s-{plugin}-truststore"] ===== `truststore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS truststore to validate the server's certificate. @@ -618,7 +618,7 @@ Use either `:truststore` or `:cacert` [id="{version}-plugins-{type}s-{plugin}-truststore_password"] ===== `truststore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -626,7 +626,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-upsert"] ===== `upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set upsert content for update mode.s @@ -635,7 +635,7 @@ Create a new document with this parameter as json string if `document_id` doesn' [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate to a secure Elasticsearch cluster @@ -643,7 +643,7 @@ Username to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] ===== `validate_after_inactivity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. @@ -658,7 +658,7 @@ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.4.0.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.4.0.asciidoc index 6e574e482..a6e79ece0 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-v7.4.0.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.4.0.asciidoc @@ -103,54 +103,54 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -161,7 +161,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-action"] ===== `action` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"index"` Protocol agnostic (i.e. non-http, non-java specific) configs go here @@ -182,7 +182,7 @@ For more details on actions, check out the http://www.elastic.co/guide/en/elasti [id="{version}-plugins-{type}s-{plugin}-bulk_path"] ===== `bulk_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to perform the _bulk requests to @@ -191,7 +191,7 @@ this defaults to a concatenation of the path parameter and "_bulk" [id="{version}-plugins-{type}s-{plugin}-cacert"] ===== `cacert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The .cer or .pem file to validate the server's certificate @@ -199,7 +199,7 @@ The .cer or .pem file to validate the server's certificate [id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] ===== `doc_as_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable `doc_as_upsert` for update mode. @@ -208,7 +208,7 @@ Create a new document with source if `document_id` doesn't exist in Elasticsearc [id="{version}-plugins-{type}s-{plugin}-document_id"] ===== `document_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document ID for the index. Useful for overwriting existing entries in @@ -217,7 +217,7 @@ Elasticsearch with the same ID. [id="{version}-plugins-{type}s-{plugin}-document_type"] ===== `document_type` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document type to write events to. Generally you should try to write only @@ -228,7 +228,7 @@ otherwise the document type will be assigned the value of 'logs' [id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] ===== `failure_type_logging_whitelist` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Set the Elasticsearch errors in the whitelist that you don't want to log. @@ -239,7 +239,7 @@ which are `document_already_exists_exception`. ===== `flush_size` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. @@ -247,7 +247,7 @@ which are `document_already_exists_exception`. [id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] ===== `healthcheck_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path where a HEAD request is sent when a backend is marked down @@ -258,7 +258,7 @@ If you have custom firewall rules you may need to change this [id="{version}-plugins-{type}s-{plugin}-hosts"] ===== `hosts` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * Default value is `[//127.0.0.1]` Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. @@ -276,7 +276,7 @@ Any special characters present in the URLs here MUST be URL escaped! This means [id="{version}-plugins-{type}s-{plugin}-http_compression"] ===== `http_compression` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond @@ -285,7 +285,7 @@ Enable gzip compression on requests. Note that response compression is on by def ===== `idle_flush_time` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` @@ -293,7 +293,7 @@ Enable gzip compression on requests. Note that response compression is on by def [id="{version}-plugins-{type}s-{plugin}-index"] ===== `index` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash-%{+YYYY.MM.dd}"` The index to write events to. This can be dynamic using the `%{foo}` syntax. @@ -307,7 +307,7 @@ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/for [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The keystore used to present a certificate to the server. @@ -316,7 +316,7 @@ It can be either .jks or .p12 [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the keystore password @@ -324,7 +324,7 @@ Set the keystore password [id="{version}-plugins-{type}s-{plugin}-manage_template"] ===== `manage_template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` From Logstash 1.3 onwards, a template is applied to Elasticsearch during @@ -343,7 +343,7 @@ API to apply your templates manually. [id="{version}-plugins-{type}s-{plugin}-parameters"] ===== `parameters` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Pass a set of key value pairs as the URL query string. This query string is added @@ -353,7 +353,7 @@ urls that already have query strings, the one specified here will be appended. [id="{version}-plugins-{type}s-{plugin}-parent"] ===== `parent` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` For child documents, ID of the associated parent. @@ -362,7 +362,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password to authenticate to a secure Elasticsearch cluster @@ -370,7 +370,7 @@ Password to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps @@ -381,7 +381,7 @@ not also set this field. That will raise an error at startup [id="{version}-plugins-{type}s-{plugin}-pipeline"] ===== `pipeline` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration @@ -390,7 +390,7 @@ here like `pipeline => "%{INGEST_PIPELINE}"` [id="{version}-plugins-{type}s-{plugin}-pool_max"] ===== `pool_max` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1000` While the output tries to reuse connections efficiently we have a maximum. @@ -401,7 +401,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] ===== `pool_max_per_route` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `100` While the output tries to reuse connections efficiently we have a maximum per endpoint. @@ -412,7 +412,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-proxy"] ===== `proxy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * There is no default value for this setting. Set the address of a forward HTTP proxy. @@ -422,7 +422,7 @@ arguments of the URI type to prevent leaking credentials. [id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] ===== `resurrect_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How frequently, in seconds, to wait between resurrection attempts. @@ -432,7 +432,7 @@ to see if they have come back to life [id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] ===== `retry_initial_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` @@ -440,7 +440,7 @@ Set initial interval in seconds between bulk retries. Doubled on each retry up t [id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] ===== `retry_max_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `64` Set max interval in seconds between bulk retries. @@ -448,7 +448,7 @@ Set max interval in seconds between bulk retries. [id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] ===== `retry_on_conflict` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The number of times Elasticsearch should internally retry an update/upserted document @@ -458,7 +458,7 @@ for more info [id="{version}-plugins-{type}s-{plugin}-routing"] ===== `routing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. A routing override to be applied to all processed events. @@ -467,7 +467,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-script"] ===== `script` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set script name for scripted update mode @@ -475,7 +475,7 @@ Set script name for scripted update mode [id="{version}-plugins-{type}s-{plugin}-script_lang"] ===== `script_lang` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"painless"` Set the language of the used script. If not set, this defaults to painless in ES 5.0 @@ -494,7 +494,7 @@ Define the type of script referenced by "script" variable [id="{version}-plugins-{type}s-{plugin}-script_var_name"] ===== `script_var_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"event"` Set variable name passed to script (scripted update) @@ -502,7 +502,7 @@ Set variable name passed to script (scripted update) [id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] ===== `scripted_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` if enabled, script is in charge of creating non-existent document (scripted update) @@ -510,7 +510,7 @@ if enabled, script is in charge of creating non-existent document (scripted upda [id="{version}-plugins-{type}s-{plugin}-sniffing"] ===== `sniffing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. @@ -522,7 +522,7 @@ manually enter multiple Elasticsearch hosts using the `hosts` parameter. [id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] ===== `sniffing_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How long to wait, in seconds, between sniffing attempts @@ -530,7 +530,7 @@ How long to wait, in seconds, between sniffing attempts [id="{version}-plugins-{type}s-{plugin}-sniffing_path"] ===== `sniffing_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to be used for the sniffing requests @@ -541,7 +541,7 @@ do not use full URL here, only paths, e.g. "/sniff/_nodes/http" [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme @@ -551,7 +551,7 @@ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS U [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] ===== `ssl_certificate_verification` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Option to validate the server's certificate. Disabling this severely compromises security. @@ -561,7 +561,7 @@ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf [id="{version}-plugins-{type}s-{plugin}-template"] ===== `template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. You can set the path to your own template here, if you so desire. @@ -570,7 +570,7 @@ If not set, the included template will be used. [id="{version}-plugins-{type}s-{plugin}-template_name"] ===== `template_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash"` This configuration option defines how the template is named inside Elasticsearch. @@ -584,7 +584,7 @@ where `OldTemplateName` is whatever the former setting was. [id="{version}-plugins-{type}s-{plugin}-template_overwrite"] ===== `template_overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` The template_overwrite option will always overwrite the indicated template @@ -601,7 +601,7 @@ the "logstash" template (i.e. removing all customized settings) [id="{version}-plugins-{type}s-{plugin}-timeout"] ===== `timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If @@ -610,7 +610,7 @@ a timeout occurs, the request will be retried. [id="{version}-plugins-{type}s-{plugin}-truststore"] ===== `truststore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS truststore to validate the server's certificate. @@ -619,7 +619,7 @@ Use either `:truststore` or `:cacert` [id="{version}-plugins-{type}s-{plugin}-truststore_password"] ===== `truststore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -627,7 +627,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-upsert"] ===== `upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set upsert content for update mode.s @@ -636,7 +636,7 @@ Create a new document with this parameter as json string if `document_id` doesn' [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate to a secure Elasticsearch cluster @@ -644,7 +644,7 @@ Username to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] ===== `validate_after_inactivity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. @@ -659,7 +659,7 @@ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.4.1.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.4.1.asciidoc index 46279a757..d56e0a3db 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-v7.4.1.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.4.1.asciidoc @@ -103,54 +103,54 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -161,7 +161,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-action"] ===== `action` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"index"` Protocol agnostic (i.e. non-http, non-java specific) configs go here @@ -182,7 +182,7 @@ For more details on actions, check out the http://www.elastic.co/guide/en/elasti [id="{version}-plugins-{type}s-{plugin}-bulk_path"] ===== `bulk_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to perform the _bulk requests to @@ -191,7 +191,7 @@ this defaults to a concatenation of the path parameter and "_bulk" [id="{version}-plugins-{type}s-{plugin}-cacert"] ===== `cacert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The .cer or .pem file to validate the server's certificate @@ -199,7 +199,7 @@ The .cer or .pem file to validate the server's certificate [id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] ===== `doc_as_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable `doc_as_upsert` for update mode. @@ -208,7 +208,7 @@ Create a new document with source if `document_id` doesn't exist in Elasticsearc [id="{version}-plugins-{type}s-{plugin}-document_id"] ===== `document_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document ID for the index. Useful for overwriting existing entries in @@ -217,7 +217,7 @@ Elasticsearch with the same ID. [id="{version}-plugins-{type}s-{plugin}-document_type"] ===== `document_type` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document type to write events to. Generally you should try to write only @@ -228,7 +228,7 @@ otherwise the document type will be assigned the value of 'logs' [id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] ===== `failure_type_logging_whitelist` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Set the Elasticsearch errors in the whitelist that you don't want to log. @@ -239,7 +239,7 @@ which are `document_already_exists_exception`. ===== `flush_size` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. @@ -247,7 +247,7 @@ which are `document_already_exists_exception`. [id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] ===== `healthcheck_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path where a HEAD request is sent when a backend is marked down @@ -258,7 +258,7 @@ If you have custom firewall rules you may need to change this [id="{version}-plugins-{type}s-{plugin}-hosts"] ===== `hosts` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * Default value is `[//127.0.0.1]` Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. @@ -276,7 +276,7 @@ Any special characters present in the URLs here MUST be URL escaped! This means [id="{version}-plugins-{type}s-{plugin}-http_compression"] ===== `http_compression` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond @@ -285,7 +285,7 @@ Enable gzip compression on requests. Note that response compression is on by def ===== `idle_flush_time` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` @@ -293,7 +293,7 @@ Enable gzip compression on requests. Note that response compression is on by def [id="{version}-plugins-{type}s-{plugin}-index"] ===== `index` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash-%{+YYYY.MM.dd}"` The index to write events to. This can be dynamic using the `%{foo}` syntax. @@ -307,7 +307,7 @@ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/for [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The keystore used to present a certificate to the server. @@ -316,7 +316,7 @@ It can be either .jks or .p12 [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the keystore password @@ -324,7 +324,7 @@ Set the keystore password [id="{version}-plugins-{type}s-{plugin}-manage_template"] ===== `manage_template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` From Logstash 1.3 onwards, a template is applied to Elasticsearch during @@ -343,7 +343,7 @@ API to apply your templates manually. [id="{version}-plugins-{type}s-{plugin}-parameters"] ===== `parameters` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Pass a set of key value pairs as the URL query string. This query string is added @@ -353,7 +353,7 @@ urls that already have query strings, the one specified here will be appended. [id="{version}-plugins-{type}s-{plugin}-parent"] ===== `parent` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` For child documents, ID of the associated parent. @@ -362,7 +362,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password to authenticate to a secure Elasticsearch cluster @@ -370,7 +370,7 @@ Password to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps @@ -381,7 +381,7 @@ not also set this field. That will raise an error at startup [id="{version}-plugins-{type}s-{plugin}-pipeline"] ===== `pipeline` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration @@ -390,7 +390,7 @@ here like `pipeline => "%{INGEST_PIPELINE}"` [id="{version}-plugins-{type}s-{plugin}-pool_max"] ===== `pool_max` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1000` While the output tries to reuse connections efficiently we have a maximum. @@ -401,7 +401,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] ===== `pool_max_per_route` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `100` While the output tries to reuse connections efficiently we have a maximum per endpoint. @@ -412,7 +412,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-proxy"] ===== `proxy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * There is no default value for this setting. Set the address of a forward HTTP proxy. @@ -422,7 +422,7 @@ arguments of the URI type to prevent leaking credentials. [id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] ===== `resurrect_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How frequently, in seconds, to wait between resurrection attempts. @@ -432,7 +432,7 @@ to see if they have come back to life [id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] ===== `retry_initial_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` @@ -440,7 +440,7 @@ Set initial interval in seconds between bulk retries. Doubled on each retry up t [id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] ===== `retry_max_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `64` Set max interval in seconds between bulk retries. @@ -448,7 +448,7 @@ Set max interval in seconds between bulk retries. [id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] ===== `retry_on_conflict` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The number of times Elasticsearch should internally retry an update/upserted document @@ -458,7 +458,7 @@ for more info [id="{version}-plugins-{type}s-{plugin}-routing"] ===== `routing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. A routing override to be applied to all processed events. @@ -467,7 +467,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-script"] ===== `script` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set script name for scripted update mode @@ -475,7 +475,7 @@ Set script name for scripted update mode [id="{version}-plugins-{type}s-{plugin}-script_lang"] ===== `script_lang` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"painless"` Set the language of the used script. If not set, this defaults to painless in ES 5.0 @@ -494,7 +494,7 @@ Define the type of script referenced by "script" variable [id="{version}-plugins-{type}s-{plugin}-script_var_name"] ===== `script_var_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"event"` Set variable name passed to script (scripted update) @@ -502,7 +502,7 @@ Set variable name passed to script (scripted update) [id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] ===== `scripted_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` if enabled, script is in charge of creating non-existent document (scripted update) @@ -510,7 +510,7 @@ if enabled, script is in charge of creating non-existent document (scripted upda [id="{version}-plugins-{type}s-{plugin}-sniffing"] ===== `sniffing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. @@ -522,7 +522,7 @@ manually enter multiple Elasticsearch hosts using the `hosts` parameter. [id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] ===== `sniffing_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How long to wait, in seconds, between sniffing attempts @@ -530,7 +530,7 @@ How long to wait, in seconds, between sniffing attempts [id="{version}-plugins-{type}s-{plugin}-sniffing_path"] ===== `sniffing_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to be used for the sniffing requests @@ -541,7 +541,7 @@ do not use full URL here, only paths, e.g. "/sniff/_nodes/http" [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme @@ -551,7 +551,7 @@ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS U [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] ===== `ssl_certificate_verification` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Option to validate the server's certificate. Disabling this severely compromises security. @@ -561,7 +561,7 @@ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf [id="{version}-plugins-{type}s-{plugin}-template"] ===== `template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. You can set the path to your own template here, if you so desire. @@ -570,7 +570,7 @@ If not set, the included template will be used. [id="{version}-plugins-{type}s-{plugin}-template_name"] ===== `template_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash"` This configuration option defines how the template is named inside Elasticsearch. @@ -584,7 +584,7 @@ where `OldTemplateName` is whatever the former setting was. [id="{version}-plugins-{type}s-{plugin}-template_overwrite"] ===== `template_overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` The template_overwrite option will always overwrite the indicated template @@ -601,7 +601,7 @@ the "logstash" template (i.e. removing all customized settings) [id="{version}-plugins-{type}s-{plugin}-timeout"] ===== `timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If @@ -610,7 +610,7 @@ a timeout occurs, the request will be retried. [id="{version}-plugins-{type}s-{plugin}-truststore"] ===== `truststore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS truststore to validate the server's certificate. @@ -619,7 +619,7 @@ Use either `:truststore` or `:cacert` [id="{version}-plugins-{type}s-{plugin}-truststore_password"] ===== `truststore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -627,7 +627,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-upsert"] ===== `upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set upsert content for update mode.s @@ -636,7 +636,7 @@ Create a new document with this parameter as json string if `document_id` doesn' [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate to a secure Elasticsearch cluster @@ -644,7 +644,7 @@ Username to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] ===== `validate_after_inactivity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. @@ -659,7 +659,7 @@ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. diff --git a/docs/versioned-plugins/outputs/elasticsearch-v7.4.2.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v7.4.2.asciidoc index 2d38f1601..915f33b3b 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-v7.4.2.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-v7.4.2.asciidoc @@ -67,13 +67,13 @@ HTTP requests to the bulk API are expected to return a 200 response code. All ot The following document errors are handled as follows: -* 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <> for more info. +* 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <<{version}-dlq-policy>> for more info. * 409 errors (conflict) are logged as a warning and dropped. Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. It is more performant for Elasticsearch to retry these exceptions than this plugin. -[[dlq-policy]] +[id="{version}-dlq-policy"] ==== DLQ Policy Mapping (404) errors from Elasticsearch can lead to data loss. Unfortunately @@ -83,7 +83,7 @@ original events causing the mapping errors are stored in a file that can be processed at a later time. Often times, the offending field can be removed and re-indexed to Elasticsearch. If the DLQ is not enabled, and a mapping error happens, the problem is logged as a warning, and the event is dropped. See -<> for more information about processing events in the DLQ. +{logstash-ref}/dead-letter-queues.html[dead-letter-queues] for more information about processing events in the DLQ. ==== Batch Sizes @@ -120,54 +120,54 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -178,7 +178,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-action"] ===== `action` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"index"` Protocol agnostic (i.e. non-http, non-java specific) configs go here @@ -199,7 +199,7 @@ For more details on actions, check out the http://www.elastic.co/guide/en/elasti [id="{version}-plugins-{type}s-{plugin}-bulk_path"] ===== `bulk_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to perform the _bulk requests to @@ -208,7 +208,7 @@ this defaults to a concatenation of the path parameter and "_bulk" [id="{version}-plugins-{type}s-{plugin}-cacert"] ===== `cacert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The .cer or .pem file to validate the server's certificate @@ -216,7 +216,7 @@ The .cer or .pem file to validate the server's certificate [id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] ===== `doc_as_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable `doc_as_upsert` for update mode. @@ -225,7 +225,7 @@ Create a new document with source if `document_id` doesn't exist in Elasticsearc [id="{version}-plugins-{type}s-{plugin}-document_id"] ===== `document_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document ID for the index. Useful for overwriting existing entries in @@ -234,7 +234,7 @@ Elasticsearch with the same ID. [id="{version}-plugins-{type}s-{plugin}-document_type"] ===== `document_type` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document type to write events to. Generally you should try to write only @@ -245,7 +245,7 @@ otherwise the document type will be assigned the value of 'logs' [id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] ===== `failure_type_logging_whitelist` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Set the Elasticsearch errors in the whitelist that you don't want to log. @@ -256,7 +256,7 @@ which are `document_already_exists_exception`. ===== `flush_size` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * There is no default value for this setting. @@ -264,7 +264,7 @@ which are `document_already_exists_exception`. [id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] ===== `healthcheck_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path where a HEAD request is sent when a backend is marked down @@ -275,7 +275,7 @@ If you have custom firewall rules you may need to change this [id="{version}-plugins-{type}s-{plugin}-hosts"] ===== `hosts` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * Default value is `[//127.0.0.1]` Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. @@ -293,7 +293,7 @@ Any special characters present in the URLs here MUST be URL escaped! This means [id="{version}-plugins-{type}s-{plugin}-http_compression"] ===== `http_compression` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond @@ -302,7 +302,7 @@ Enable gzip compression on requests. Note that response compression is on by def ===== `idle_flush_time` (DEPRECATED) * DEPRECATED WARNING: This configuration item is deprecated and may not be available in future versions. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` @@ -310,7 +310,7 @@ Enable gzip compression on requests. Note that response compression is on by def [id="{version}-plugins-{type}s-{plugin}-index"] ===== `index` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash-%{+YYYY.MM.dd}"` The index to write events to. This can be dynamic using the `%{foo}` syntax. @@ -324,7 +324,7 @@ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/for [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The keystore used to present a certificate to the server. @@ -333,7 +333,7 @@ It can be either .jks or .p12 [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the keystore password @@ -341,7 +341,7 @@ Set the keystore password [id="{version}-plugins-{type}s-{plugin}-manage_template"] ===== `manage_template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` From Logstash 1.3 onwards, a template is applied to Elasticsearch during @@ -360,7 +360,7 @@ API to apply your templates manually. [id="{version}-plugins-{type}s-{plugin}-parameters"] ===== `parameters` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Pass a set of key value pairs as the URL query string. This query string is added @@ -370,7 +370,7 @@ urls that already have query strings, the one specified here will be appended. [id="{version}-plugins-{type}s-{plugin}-parent"] ===== `parent` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` For child documents, ID of the associated parent. @@ -379,7 +379,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password to authenticate to a secure Elasticsearch cluster @@ -387,7 +387,7 @@ Password to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps @@ -398,7 +398,7 @@ not also set this field. That will raise an error at startup [id="{version}-plugins-{type}s-{plugin}-pipeline"] ===== `pipeline` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration @@ -407,7 +407,7 @@ here like `pipeline => "%{INGEST_PIPELINE}"` [id="{version}-plugins-{type}s-{plugin}-pool_max"] ===== `pool_max` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1000` While the output tries to reuse connections efficiently we have a maximum. @@ -418,7 +418,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] ===== `pool_max_per_route` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `100` While the output tries to reuse connections efficiently we have a maximum per endpoint. @@ -429,7 +429,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-proxy"] ===== `proxy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * There is no default value for this setting. Set the address of a forward HTTP proxy. @@ -439,7 +439,7 @@ arguments of the URI type to prevent leaking credentials. [id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] ===== `resurrect_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How frequently, in seconds, to wait between resurrection attempts. @@ -449,7 +449,7 @@ to see if they have come back to life [id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] ===== `retry_initial_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` @@ -457,7 +457,7 @@ Set initial interval in seconds between bulk retries. Doubled on each retry up t [id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] ===== `retry_max_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `64` Set max interval in seconds between bulk retries. @@ -465,7 +465,7 @@ Set max interval in seconds between bulk retries. [id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] ===== `retry_on_conflict` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The number of times Elasticsearch should internally retry an update/upserted document @@ -475,7 +475,7 @@ for more info [id="{version}-plugins-{type}s-{plugin}-routing"] ===== `routing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. A routing override to be applied to all processed events. @@ -484,7 +484,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-script"] ===== `script` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set script name for scripted update mode @@ -492,7 +492,7 @@ Set script name for scripted update mode [id="{version}-plugins-{type}s-{plugin}-script_lang"] ===== `script_lang` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"painless"` Set the language of the used script. If not set, this defaults to painless in ES 5.0 @@ -511,7 +511,7 @@ Define the type of script referenced by "script" variable [id="{version}-plugins-{type}s-{plugin}-script_var_name"] ===== `script_var_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"event"` Set variable name passed to script (scripted update) @@ -519,7 +519,7 @@ Set variable name passed to script (scripted update) [id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] ===== `scripted_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` if enabled, script is in charge of creating non-existent document (scripted update) @@ -527,7 +527,7 @@ if enabled, script is in charge of creating non-existent document (scripted upda [id="{version}-plugins-{type}s-{plugin}-sniffing"] ===== `sniffing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. @@ -539,7 +539,7 @@ manually enter multiple Elasticsearch hosts using the `hosts` parameter. [id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] ===== `sniffing_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How long to wait, in seconds, between sniffing attempts @@ -547,7 +547,7 @@ How long to wait, in seconds, between sniffing attempts [id="{version}-plugins-{type}s-{plugin}-sniffing_path"] ===== `sniffing_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to be used for the sniffing requests @@ -558,7 +558,7 @@ do not use full URL here, only paths, e.g. "/sniff/_nodes/http" [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme @@ -568,7 +568,7 @@ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS U [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] ===== `ssl_certificate_verification` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Option to validate the server's certificate. Disabling this severely compromises security. @@ -578,7 +578,7 @@ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf [id="{version}-plugins-{type}s-{plugin}-template"] ===== `template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. You can set the path to your own template here, if you so desire. @@ -587,7 +587,7 @@ If not set, the included template will be used. [id="{version}-plugins-{type}s-{plugin}-template_name"] ===== `template_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash"` This configuration option defines how the template is named inside Elasticsearch. @@ -601,7 +601,7 @@ where `OldTemplateName` is whatever the former setting was. [id="{version}-plugins-{type}s-{plugin}-template_overwrite"] ===== `template_overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` The template_overwrite option will always overwrite the indicated template @@ -618,7 +618,7 @@ the "logstash" template (i.e. removing all customized settings) [id="{version}-plugins-{type}s-{plugin}-timeout"] ===== `timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If @@ -627,7 +627,7 @@ a timeout occurs, the request will be retried. [id="{version}-plugins-{type}s-{plugin}-truststore"] ===== `truststore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS truststore to validate the server's certificate. @@ -636,7 +636,7 @@ Use either `:truststore` or `:cacert` [id="{version}-plugins-{type}s-{plugin}-truststore_password"] ===== `truststore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -644,7 +644,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-upsert"] ===== `upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set upsert content for update mode.s @@ -653,7 +653,7 @@ Create a new document with this parameter as json string if `document_id` doesn' [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate to a secure Elasticsearch cluster @@ -661,7 +661,7 @@ Username to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] ===== `validate_after_inactivity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. @@ -676,7 +676,7 @@ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.0.0.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.0.0.asciidoc index 4330bbf8b..ac3746529 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-v8.0.0.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-v8.0.0.asciidoc @@ -102,54 +102,54 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -160,7 +160,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-action"] ===== `action` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"index"` Protocol agnostic (i.e. non-http, non-java specific) configs go here @@ -181,7 +181,7 @@ For more details on actions, check out the http://www.elastic.co/guide/en/elasti [id="{version}-plugins-{type}s-{plugin}-bulk_path"] ===== `bulk_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to perform the _bulk requests to @@ -190,7 +190,7 @@ this defaults to a concatenation of the path parameter and "_bulk" [id="{version}-plugins-{type}s-{plugin}-cacert"] ===== `cacert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The .cer or .pem file to validate the server's certificate @@ -198,7 +198,7 @@ The .cer or .pem file to validate the server's certificate [id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] ===== `doc_as_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable `doc_as_upsert` for update mode. @@ -207,7 +207,7 @@ Create a new document with source if `document_id` doesn't exist in Elasticsearc [id="{version}-plugins-{type}s-{plugin}-document_id"] ===== `document_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document ID for the index. Useful for overwriting existing entries in @@ -216,7 +216,7 @@ Elasticsearch with the same ID. [id="{version}-plugins-{type}s-{plugin}-document_type"] ===== `document_type` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document type to write events to. Generally you should try to write only @@ -227,7 +227,7 @@ otherwise the document type will be assigned the value of 'logs' [id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] ===== `failure_type_logging_whitelist` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Set the Elasticsearch errors in the whitelist that you don't want to log. @@ -237,7 +237,7 @@ which are `document_already_exists_exception`. [id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] ===== `healthcheck_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path where a HEAD request is sent when a backend is marked down @@ -248,7 +248,7 @@ If you have custom firewall rules you may need to change this [id="{version}-plugins-{type}s-{plugin}-hosts"] ===== `hosts` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * Default value is `[//127.0.0.1]` Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. @@ -266,7 +266,7 @@ Any special characters present in the URLs here MUST be URL escaped! This means [id="{version}-plugins-{type}s-{plugin}-http_compression"] ===== `http_compression` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond @@ -274,7 +274,7 @@ Enable gzip compression on requests. Note that response compression is on by def [id="{version}-plugins-{type}s-{plugin}-index"] ===== `index` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash-%{+YYYY.MM.dd}"` The index to write events to. This can be dynamic using the `%{foo}` syntax. @@ -288,7 +288,7 @@ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/for [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The keystore used to present a certificate to the server. @@ -297,7 +297,7 @@ It can be either .jks or .p12 [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the keystore password @@ -305,7 +305,7 @@ Set the keystore password [id="{version}-plugins-{type}s-{plugin}-manage_template"] ===== `manage_template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` From Logstash 1.3 onwards, a template is applied to Elasticsearch during @@ -324,7 +324,7 @@ API to apply your templates manually. [id="{version}-plugins-{type}s-{plugin}-parameters"] ===== `parameters` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Pass a set of key value pairs as the URL query string. This query string is added @@ -334,7 +334,7 @@ urls that already have query strings, the one specified here will be appended. [id="{version}-plugins-{type}s-{plugin}-parent"] ===== `parent` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` For child documents, ID of the associated parent. @@ -343,7 +343,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password to authenticate to a secure Elasticsearch cluster @@ -351,7 +351,7 @@ Password to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps @@ -362,7 +362,7 @@ not also set this field. That will raise an error at startup [id="{version}-plugins-{type}s-{plugin}-pipeline"] ===== `pipeline` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration @@ -371,7 +371,7 @@ here like `pipeline => "%{INGEST_PIPELINE}"` [id="{version}-plugins-{type}s-{plugin}-pool_max"] ===== `pool_max` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1000` While the output tries to reuse connections efficiently we have a maximum. @@ -382,7 +382,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] ===== `pool_max_per_route` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `100` While the output tries to reuse connections efficiently we have a maximum per endpoint. @@ -393,7 +393,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-proxy"] ===== `proxy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * There is no default value for this setting. Set the address of a forward HTTP proxy. @@ -403,7 +403,7 @@ arguments of the URI type to prevent leaking credentials. [id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] ===== `resurrect_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How frequently, in seconds, to wait between resurrection attempts. @@ -413,7 +413,7 @@ to see if they have come back to life [id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] ===== `retry_initial_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` @@ -421,7 +421,7 @@ Set initial interval in seconds between bulk retries. Doubled on each retry up t [id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] ===== `retry_max_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `64` Set max interval in seconds between bulk retries. @@ -429,7 +429,7 @@ Set max interval in seconds between bulk retries. [id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] ===== `retry_on_conflict` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The number of times Elasticsearch should internally retry an update/upserted document @@ -439,7 +439,7 @@ for more info [id="{version}-plugins-{type}s-{plugin}-routing"] ===== `routing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. A routing override to be applied to all processed events. @@ -448,7 +448,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-script"] ===== `script` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set script name for scripted update mode @@ -456,7 +456,7 @@ Set script name for scripted update mode [id="{version}-plugins-{type}s-{plugin}-script_lang"] ===== `script_lang` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"painless"` Set the language of the used script. If not set, this defaults to painless in ES 5.0 @@ -475,7 +475,7 @@ Define the type of script referenced by "script" variable [id="{version}-plugins-{type}s-{plugin}-script_var_name"] ===== `script_var_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"event"` Set variable name passed to script (scripted update) @@ -483,7 +483,7 @@ Set variable name passed to script (scripted update) [id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] ===== `scripted_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` if enabled, script is in charge of creating non-existent document (scripted update) @@ -491,7 +491,7 @@ if enabled, script is in charge of creating non-existent document (scripted upda [id="{version}-plugins-{type}s-{plugin}-sniffing"] ===== `sniffing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. @@ -503,7 +503,7 @@ manually enter multiple Elasticsearch hosts using the `hosts` parameter. [id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] ===== `sniffing_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How long to wait, in seconds, between sniffing attempts @@ -511,7 +511,7 @@ How long to wait, in seconds, between sniffing attempts [id="{version}-plugins-{type}s-{plugin}-sniffing_path"] ===== `sniffing_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to be used for the sniffing requests @@ -522,7 +522,7 @@ do not use full URL here, only paths, e.g. "/sniff/_nodes/http" [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme @@ -532,7 +532,7 @@ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS U [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] ===== `ssl_certificate_verification` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Option to validate the server's certificate. Disabling this severely compromises security. @@ -542,7 +542,7 @@ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf [id="{version}-plugins-{type}s-{plugin}-template"] ===== `template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. You can set the path to your own template here, if you so desire. @@ -551,7 +551,7 @@ If not set, the included template will be used. [id="{version}-plugins-{type}s-{plugin}-template_name"] ===== `template_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash"` This configuration option defines how the template is named inside Elasticsearch. @@ -565,7 +565,7 @@ where `OldTemplateName` is whatever the former setting was. [id="{version}-plugins-{type}s-{plugin}-template_overwrite"] ===== `template_overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` The template_overwrite option will always overwrite the indicated template @@ -582,7 +582,7 @@ the "logstash" template (i.e. removing all customized settings) [id="{version}-plugins-{type}s-{plugin}-timeout"] ===== `timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If @@ -591,7 +591,7 @@ a timeout occurs, the request will be retried. [id="{version}-plugins-{type}s-{plugin}-truststore"] ===== `truststore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS truststore to validate the server's certificate. @@ -600,7 +600,7 @@ Use either `:truststore` or `:cacert` [id="{version}-plugins-{type}s-{plugin}-truststore_password"] ===== `truststore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -608,7 +608,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-upsert"] ===== `upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set upsert content for update mode.s @@ -617,7 +617,7 @@ Create a new document with this parameter as json string if `document_id` doesn' [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate to a secure Elasticsearch cluster @@ -625,7 +625,7 @@ Username to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] ===== `validate_after_inactivity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. @@ -640,7 +640,7 @@ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.0.1.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.0.1.asciidoc index 908aa3e4c..8cc8c935b 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-v8.0.1.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-v8.0.1.asciidoc @@ -102,54 +102,54 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -160,7 +160,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-action"] ===== `action` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"index"` Protocol agnostic (i.e. non-http, non-java specific) configs go here @@ -181,7 +181,7 @@ For more details on actions, check out the http://www.elastic.co/guide/en/elasti [id="{version}-plugins-{type}s-{plugin}-bulk_path"] ===== `bulk_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to perform the _bulk requests to @@ -190,7 +190,7 @@ this defaults to a concatenation of the path parameter and "_bulk" [id="{version}-plugins-{type}s-{plugin}-cacert"] ===== `cacert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The .cer or .pem file to validate the server's certificate @@ -198,7 +198,7 @@ The .cer or .pem file to validate the server's certificate [id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] ===== `doc_as_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable `doc_as_upsert` for update mode. @@ -207,7 +207,7 @@ Create a new document with source if `document_id` doesn't exist in Elasticsearc [id="{version}-plugins-{type}s-{plugin}-document_id"] ===== `document_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document ID for the index. Useful for overwriting existing entries in @@ -216,7 +216,7 @@ Elasticsearch with the same ID. [id="{version}-plugins-{type}s-{plugin}-document_type"] ===== `document_type` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document type to write events to. Generally you should try to write only @@ -227,7 +227,7 @@ otherwise the document type will be assigned the value of 'logs' [id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] ===== `failure_type_logging_whitelist` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Set the Elasticsearch errors in the whitelist that you don't want to log. @@ -237,7 +237,7 @@ which are `document_already_exists_exception`. [id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] ===== `healthcheck_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path where a HEAD request is sent when a backend is marked down @@ -248,7 +248,7 @@ If you have custom firewall rules you may need to change this [id="{version}-plugins-{type}s-{plugin}-hosts"] ===== `hosts` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * Default value is `[//127.0.0.1]` Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. @@ -266,7 +266,7 @@ Any special characters present in the URLs here MUST be URL escaped! This means [id="{version}-plugins-{type}s-{plugin}-http_compression"] ===== `http_compression` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond @@ -274,7 +274,7 @@ Enable gzip compression on requests. Note that response compression is on by def [id="{version}-plugins-{type}s-{plugin}-index"] ===== `index` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash-%{+YYYY.MM.dd}"` The index to write events to. This can be dynamic using the `%{foo}` syntax. @@ -288,7 +288,7 @@ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/for [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The keystore used to present a certificate to the server. @@ -297,7 +297,7 @@ It can be either .jks or .p12 [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the keystore password @@ -305,7 +305,7 @@ Set the keystore password [id="{version}-plugins-{type}s-{plugin}-manage_template"] ===== `manage_template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` From Logstash 1.3 onwards, a template is applied to Elasticsearch during @@ -324,7 +324,7 @@ API to apply your templates manually. [id="{version}-plugins-{type}s-{plugin}-parameters"] ===== `parameters` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Pass a set of key value pairs as the URL query string. This query string is added @@ -334,7 +334,7 @@ urls that already have query strings, the one specified here will be appended. [id="{version}-plugins-{type}s-{plugin}-parent"] ===== `parent` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` For child documents, ID of the associated parent. @@ -343,7 +343,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password to authenticate to a secure Elasticsearch cluster @@ -351,7 +351,7 @@ Password to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps @@ -362,7 +362,7 @@ not also set this field. That will raise an error at startup [id="{version}-plugins-{type}s-{plugin}-pipeline"] ===== `pipeline` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration @@ -371,7 +371,7 @@ here like `pipeline => "%{INGEST_PIPELINE}"` [id="{version}-plugins-{type}s-{plugin}-pool_max"] ===== `pool_max` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1000` While the output tries to reuse connections efficiently we have a maximum. @@ -382,7 +382,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] ===== `pool_max_per_route` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `100` While the output tries to reuse connections efficiently we have a maximum per endpoint. @@ -393,7 +393,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-proxy"] ===== `proxy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * There is no default value for this setting. Set the address of a forward HTTP proxy. @@ -403,7 +403,7 @@ arguments of the URI type to prevent leaking credentials. [id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] ===== `resurrect_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How frequently, in seconds, to wait between resurrection attempts. @@ -413,7 +413,7 @@ to see if they have come back to life [id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] ===== `retry_initial_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` @@ -421,7 +421,7 @@ Set initial interval in seconds between bulk retries. Doubled on each retry up t [id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] ===== `retry_max_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `64` Set max interval in seconds between bulk retries. @@ -429,7 +429,7 @@ Set max interval in seconds between bulk retries. [id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] ===== `retry_on_conflict` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The number of times Elasticsearch should internally retry an update/upserted document @@ -439,7 +439,7 @@ for more info [id="{version}-plugins-{type}s-{plugin}-routing"] ===== `routing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. A routing override to be applied to all processed events. @@ -448,7 +448,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-script"] ===== `script` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set script name for scripted update mode @@ -456,7 +456,7 @@ Set script name for scripted update mode [id="{version}-plugins-{type}s-{plugin}-script_lang"] ===== `script_lang` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"painless"` Set the language of the used script. If not set, this defaults to painless in ES 5.0 @@ -475,7 +475,7 @@ Define the type of script referenced by "script" variable [id="{version}-plugins-{type}s-{plugin}-script_var_name"] ===== `script_var_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"event"` Set variable name passed to script (scripted update) @@ -483,7 +483,7 @@ Set variable name passed to script (scripted update) [id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] ===== `scripted_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` if enabled, script is in charge of creating non-existent document (scripted update) @@ -491,7 +491,7 @@ if enabled, script is in charge of creating non-existent document (scripted upda [id="{version}-plugins-{type}s-{plugin}-sniffing"] ===== `sniffing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. @@ -503,7 +503,7 @@ manually enter multiple Elasticsearch hosts using the `hosts` parameter. [id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] ===== `sniffing_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How long to wait, in seconds, between sniffing attempts @@ -511,7 +511,7 @@ How long to wait, in seconds, between sniffing attempts [id="{version}-plugins-{type}s-{plugin}-sniffing_path"] ===== `sniffing_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to be used for the sniffing requests @@ -522,7 +522,7 @@ do not use full URL here, only paths, e.g. "/sniff/_nodes/http" [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme @@ -532,7 +532,7 @@ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS U [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] ===== `ssl_certificate_verification` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Option to validate the server's certificate. Disabling this severely compromises security. @@ -542,7 +542,7 @@ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf [id="{version}-plugins-{type}s-{plugin}-template"] ===== `template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. You can set the path to your own template here, if you so desire. @@ -551,7 +551,7 @@ If not set, the included template will be used. [id="{version}-plugins-{type}s-{plugin}-template_name"] ===== `template_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash"` This configuration option defines how the template is named inside Elasticsearch. @@ -565,7 +565,7 @@ where `OldTemplateName` is whatever the former setting was. [id="{version}-plugins-{type}s-{plugin}-template_overwrite"] ===== `template_overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` The template_overwrite option will always overwrite the indicated template @@ -582,7 +582,7 @@ the "logstash" template (i.e. removing all customized settings) [id="{version}-plugins-{type}s-{plugin}-timeout"] ===== `timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If @@ -591,7 +591,7 @@ a timeout occurs, the request will be retried. [id="{version}-plugins-{type}s-{plugin}-truststore"] ===== `truststore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS truststore to validate the server's certificate. @@ -600,7 +600,7 @@ Use either `:truststore` or `:cacert` [id="{version}-plugins-{type}s-{plugin}-truststore_password"] ===== `truststore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -608,7 +608,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-upsert"] ===== `upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set upsert content for update mode.s @@ -617,7 +617,7 @@ Create a new document with this parameter as json string if `document_id` doesn' [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate to a secure Elasticsearch cluster @@ -625,7 +625,7 @@ Username to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] ===== `validate_after_inactivity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. @@ -640,7 +640,7 @@ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc index 71e909104..6f4469ac7 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-v8.1.1.asciidoc @@ -103,54 +103,54 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -161,7 +161,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-action"] ===== `action` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"index"` Protocol agnostic (i.e. non-http, non-java specific) configs go here @@ -182,7 +182,7 @@ For more details on actions, check out the http://www.elastic.co/guide/en/elasti [id="{version}-plugins-{type}s-{plugin}-bulk_path"] ===== `bulk_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to perform the _bulk requests to @@ -191,7 +191,7 @@ this defaults to a concatenation of the path parameter and "_bulk" [id="{version}-plugins-{type}s-{plugin}-cacert"] ===== `cacert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The .cer or .pem file to validate the server's certificate @@ -199,7 +199,7 @@ The .cer or .pem file to validate the server's certificate [id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] ===== `doc_as_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable `doc_as_upsert` for update mode. @@ -208,7 +208,7 @@ Create a new document with source if `document_id` doesn't exist in Elasticsearc [id="{version}-plugins-{type}s-{plugin}-document_id"] ===== `document_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document ID for the index. Useful for overwriting existing entries in @@ -217,7 +217,7 @@ Elasticsearch with the same ID. [id="{version}-plugins-{type}s-{plugin}-document_type"] ===== `document_type` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document type to write events to. Generally you should try to write only @@ -228,7 +228,7 @@ otherwise the document type will be assigned the value of 'logs' [id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] ===== `failure_type_logging_whitelist` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Set the Elasticsearch errors in the whitelist that you don't want to log. @@ -238,7 +238,7 @@ which are `document_already_exists_exception`. [id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] ===== `healthcheck_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path where a HEAD request is sent when a backend is marked down @@ -249,7 +249,7 @@ If you have custom firewall rules you may need to change this [id="{version}-plugins-{type}s-{plugin}-hosts"] ===== `hosts` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * Default value is `[//127.0.0.1]` Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. @@ -267,7 +267,7 @@ Any special characters present in the URLs here MUST be URL escaped! This means [id="{version}-plugins-{type}s-{plugin}-http_compression"] ===== `http_compression` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond @@ -275,7 +275,7 @@ Enable gzip compression on requests. Note that response compression is on by def [id="{version}-plugins-{type}s-{plugin}-index"] ===== `index` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash-%{+YYYY.MM.dd}"` The index to write events to. This can be dynamic using the `%{foo}` syntax. @@ -289,7 +289,7 @@ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/for [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The keystore used to present a certificate to the server. @@ -298,7 +298,7 @@ It can be either .jks or .p12 [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the keystore password @@ -306,7 +306,7 @@ Set the keystore password [id="{version}-plugins-{type}s-{plugin}-manage_template"] ===== `manage_template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` From Logstash 1.3 onwards, a template is applied to Elasticsearch during @@ -325,7 +325,7 @@ API to apply your templates manually. [id="{version}-plugins-{type}s-{plugin}-parameters"] ===== `parameters` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Pass a set of key value pairs as the URL query string. This query string is added @@ -335,7 +335,7 @@ urls that already have query strings, the one specified here will be appended. [id="{version}-plugins-{type}s-{plugin}-parent"] ===== `parent` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` For child documents, ID of the associated parent. @@ -344,7 +344,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password to authenticate to a secure Elasticsearch cluster @@ -352,7 +352,7 @@ Password to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps @@ -363,7 +363,7 @@ not also set this field. That will raise an error at startup [id="{version}-plugins-{type}s-{plugin}-pipeline"] ===== `pipeline` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration @@ -372,7 +372,7 @@ here like `pipeline => "%{INGEST_PIPELINE}"` [id="{version}-plugins-{type}s-{plugin}-pool_max"] ===== `pool_max` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1000` While the output tries to reuse connections efficiently we have a maximum. @@ -383,7 +383,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] ===== `pool_max_per_route` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `100` While the output tries to reuse connections efficiently we have a maximum per endpoint. @@ -394,7 +394,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-proxy"] ===== `proxy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * There is no default value for this setting. Set the address of a forward HTTP proxy. @@ -404,7 +404,7 @@ arguments of the URI type to prevent leaking credentials. [id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] ===== `resurrect_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How frequently, in seconds, to wait between resurrection attempts. @@ -414,7 +414,7 @@ to see if they have come back to life [id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] ===== `retry_initial_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` @@ -422,7 +422,7 @@ Set initial interval in seconds between bulk retries. Doubled on each retry up t [id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] ===== `retry_max_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `64` Set max interval in seconds between bulk retries. @@ -430,7 +430,7 @@ Set max interval in seconds between bulk retries. [id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] ===== `retry_on_conflict` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The number of times Elasticsearch should internally retry an update/upserted document @@ -440,7 +440,7 @@ for more info [id="{version}-plugins-{type}s-{plugin}-routing"] ===== `routing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. A routing override to be applied to all processed events. @@ -449,7 +449,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-script"] ===== `script` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set script name for scripted update mode @@ -457,7 +457,7 @@ Set script name for scripted update mode [id="{version}-plugins-{type}s-{plugin}-script_lang"] ===== `script_lang` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"painless"` Set the language of the used script. If not set, this defaults to painless in ES 5.0 @@ -476,7 +476,7 @@ Define the type of script referenced by "script" variable [id="{version}-plugins-{type}s-{plugin}-script_var_name"] ===== `script_var_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"event"` Set variable name passed to script (scripted update) @@ -484,7 +484,7 @@ Set variable name passed to script (scripted update) [id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] ===== `scripted_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` if enabled, script is in charge of creating non-existent document (scripted update) @@ -492,7 +492,7 @@ if enabled, script is in charge of creating non-existent document (scripted upda [id="{version}-plugins-{type}s-{plugin}-sniffing"] ===== `sniffing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. @@ -504,7 +504,7 @@ manually enter multiple Elasticsearch hosts using the `hosts` parameter. [id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] ===== `sniffing_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How long to wait, in seconds, between sniffing attempts @@ -512,7 +512,7 @@ How long to wait, in seconds, between sniffing attempts [id="{version}-plugins-{type}s-{plugin}-sniffing_path"] ===== `sniffing_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to be used for the sniffing requests @@ -523,7 +523,7 @@ do not use full URL here, only paths, e.g. "/sniff/_nodes/http" [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme @@ -533,7 +533,7 @@ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS U [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] ===== `ssl_certificate_verification` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Option to validate the server's certificate. Disabling this severely compromises security. @@ -543,7 +543,7 @@ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf [id="{version}-plugins-{type}s-{plugin}-template"] ===== `template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. You can set the path to your own template here, if you so desire. @@ -552,7 +552,7 @@ If not set, the included template will be used. [id="{version}-plugins-{type}s-{plugin}-template_name"] ===== `template_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash"` This configuration option defines how the template is named inside Elasticsearch. @@ -566,7 +566,7 @@ where `OldTemplateName` is whatever the former setting was. [id="{version}-plugins-{type}s-{plugin}-template_overwrite"] ===== `template_overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` The template_overwrite option will always overwrite the indicated template @@ -583,7 +583,7 @@ the "logstash" template (i.e. removing all customized settings) [id="{version}-plugins-{type}s-{plugin}-timeout"] ===== `timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If @@ -592,7 +592,7 @@ a timeout occurs, the request will be retried. [id="{version}-plugins-{type}s-{plugin}-truststore"] ===== `truststore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS truststore to validate the server's certificate. @@ -601,7 +601,7 @@ Use either `:truststore` or `:cacert` [id="{version}-plugins-{type}s-{plugin}-truststore_password"] ===== `truststore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -609,7 +609,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-upsert"] ===== `upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set upsert content for update mode.s @@ -618,7 +618,7 @@ Create a new document with this parameter as json string if `document_id` doesn' [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate to a secure Elasticsearch cluster @@ -626,7 +626,7 @@ Username to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] ===== `validate_after_inactivity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. @@ -641,7 +641,7 @@ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.2.0.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.2.0.asciidoc index ec66eda65..545d919a1 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-v8.2.0.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-v8.2.0.asciidoc @@ -103,54 +103,54 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -161,7 +161,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-action"] ===== `action` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"index"` Protocol agnostic (i.e. non-http, non-java specific) configs go here @@ -182,7 +182,7 @@ For more details on actions, check out the http://www.elastic.co/guide/en/elasti [id="{version}-plugins-{type}s-{plugin}-bulk_path"] ===== `bulk_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to perform the _bulk requests to @@ -191,7 +191,7 @@ this defaults to a concatenation of the path parameter and "_bulk" [id="{version}-plugins-{type}s-{plugin}-cacert"] ===== `cacert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The .cer or .pem file to validate the server's certificate @@ -199,7 +199,7 @@ The .cer or .pem file to validate the server's certificate [id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] ===== `doc_as_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable `doc_as_upsert` for update mode. @@ -208,7 +208,7 @@ Create a new document with source if `document_id` doesn't exist in Elasticsearc [id="{version}-plugins-{type}s-{plugin}-document_id"] ===== `document_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document ID for the index. Useful for overwriting existing entries in @@ -217,7 +217,7 @@ Elasticsearch with the same ID. [id="{version}-plugins-{type}s-{plugin}-document_type"] ===== `document_type` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document type to write events to. Generally you should try to write only @@ -228,7 +228,7 @@ otherwise the document type will be assigned the value of 'logs' [id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] ===== `failure_type_logging_whitelist` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Set the Elasticsearch errors in the whitelist that you don't want to log. @@ -238,7 +238,7 @@ which are `document_already_exists_exception`. [id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] ===== `healthcheck_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path where a HEAD request is sent when a backend is marked down @@ -249,7 +249,7 @@ If you have custom firewall rules you may need to change this [id="{version}-plugins-{type}s-{plugin}-hosts"] ===== `hosts` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * Default value is `[//127.0.0.1]` Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. @@ -267,7 +267,7 @@ Any special characters present in the URLs here MUST be URL escaped! This means [id="{version}-plugins-{type}s-{plugin}-http_compression"] ===== `http_compression` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond @@ -275,7 +275,7 @@ Enable gzip compression on requests. Note that response compression is on by def [id="{version}-plugins-{type}s-{plugin}-index"] ===== `index` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash-%{+YYYY.MM.dd}"` The index to write events to. This can be dynamic using the `%{foo}` syntax. @@ -289,7 +289,7 @@ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/for [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The keystore used to present a certificate to the server. @@ -298,7 +298,7 @@ It can be either .jks or .p12 [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the keystore password @@ -306,7 +306,7 @@ Set the keystore password [id="{version}-plugins-{type}s-{plugin}-manage_template"] ===== `manage_template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` From Logstash 1.3 onwards, a template is applied to Elasticsearch during @@ -325,7 +325,7 @@ API to apply your templates manually. [id="{version}-plugins-{type}s-{plugin}-parameters"] ===== `parameters` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Pass a set of key value pairs as the URL query string. This query string is added @@ -335,7 +335,7 @@ urls that already have query strings, the one specified here will be appended. [id="{version}-plugins-{type}s-{plugin}-parent"] ===== `parent` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` For child documents, ID of the associated parent. @@ -344,7 +344,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password to authenticate to a secure Elasticsearch cluster @@ -352,7 +352,7 @@ Password to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps @@ -363,7 +363,7 @@ not also set this field. That will raise an error at startup [id="{version}-plugins-{type}s-{plugin}-pipeline"] ===== `pipeline` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration @@ -372,7 +372,7 @@ here like `pipeline => "%{INGEST_PIPELINE}"` [id="{version}-plugins-{type}s-{plugin}-pool_max"] ===== `pool_max` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1000` While the output tries to reuse connections efficiently we have a maximum. @@ -383,7 +383,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] ===== `pool_max_per_route` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `100` While the output tries to reuse connections efficiently we have a maximum per endpoint. @@ -394,7 +394,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-proxy"] ===== `proxy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * There is no default value for this setting. Set the address of a forward HTTP proxy. @@ -404,7 +404,7 @@ arguments of the URI type to prevent leaking credentials. [id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] ===== `resurrect_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How frequently, in seconds, to wait between resurrection attempts. @@ -414,7 +414,7 @@ to see if they have come back to life [id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] ===== `retry_initial_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` @@ -422,7 +422,7 @@ Set initial interval in seconds between bulk retries. Doubled on each retry up t [id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] ===== `retry_max_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `64` Set max interval in seconds between bulk retries. @@ -430,7 +430,7 @@ Set max interval in seconds between bulk retries. [id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] ===== `retry_on_conflict` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The number of times Elasticsearch should internally retry an update/upserted document @@ -440,7 +440,7 @@ for more info [id="{version}-plugins-{type}s-{plugin}-routing"] ===== `routing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. A routing override to be applied to all processed events. @@ -449,7 +449,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-script"] ===== `script` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set script name for scripted update mode @@ -457,7 +457,7 @@ Set script name for scripted update mode [id="{version}-plugins-{type}s-{plugin}-script_lang"] ===== `script_lang` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"painless"` Set the language of the used script. If not set, this defaults to painless in ES 5.0 @@ -476,7 +476,7 @@ Define the type of script referenced by "script" variable [id="{version}-plugins-{type}s-{plugin}-script_var_name"] ===== `script_var_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"event"` Set variable name passed to script (scripted update) @@ -484,7 +484,7 @@ Set variable name passed to script (scripted update) [id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] ===== `scripted_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` if enabled, script is in charge of creating non-existent document (scripted update) @@ -492,7 +492,7 @@ if enabled, script is in charge of creating non-existent document (scripted upda [id="{version}-plugins-{type}s-{plugin}-sniffing"] ===== `sniffing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. @@ -504,7 +504,7 @@ manually enter multiple Elasticsearch hosts using the `hosts` parameter. [id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] ===== `sniffing_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How long to wait, in seconds, between sniffing attempts @@ -512,7 +512,7 @@ How long to wait, in seconds, between sniffing attempts [id="{version}-plugins-{type}s-{plugin}-sniffing_path"] ===== `sniffing_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to be used for the sniffing requests @@ -523,7 +523,7 @@ do not use full URL here, only paths, e.g. "/sniff/_nodes/http" [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme @@ -533,7 +533,7 @@ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS U [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] ===== `ssl_certificate_verification` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Option to validate the server's certificate. Disabling this severely compromises security. @@ -543,7 +543,7 @@ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf [id="{version}-plugins-{type}s-{plugin}-template"] ===== `template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. You can set the path to your own template here, if you so desire. @@ -552,7 +552,7 @@ If not set, the included template will be used. [id="{version}-plugins-{type}s-{plugin}-template_name"] ===== `template_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash"` This configuration option defines how the template is named inside Elasticsearch. @@ -566,7 +566,7 @@ where `OldTemplateName` is whatever the former setting was. [id="{version}-plugins-{type}s-{plugin}-template_overwrite"] ===== `template_overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` The template_overwrite option will always overwrite the indicated template @@ -583,7 +583,7 @@ the "logstash" template (i.e. removing all customized settings) [id="{version}-plugins-{type}s-{plugin}-timeout"] ===== `timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If @@ -592,7 +592,7 @@ a timeout occurs, the request will be retried. [id="{version}-plugins-{type}s-{plugin}-truststore"] ===== `truststore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS truststore to validate the server's certificate. @@ -601,7 +601,7 @@ Use either `:truststore` or `:cacert` [id="{version}-plugins-{type}s-{plugin}-truststore_password"] ===== `truststore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -609,7 +609,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-upsert"] ===== `upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set upsert content for update mode.s @@ -618,7 +618,7 @@ Create a new document with this parameter as json string if `document_id` doesn' [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate to a secure Elasticsearch cluster @@ -626,7 +626,7 @@ Username to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] ===== `validate_after_inactivity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. @@ -641,7 +641,7 @@ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. diff --git a/docs/versioned-plugins/outputs/elasticsearch-v8.2.2.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v8.2.2.asciidoc index b533ea863..7a80a8582 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-v8.2.2.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-v8.2.2.asciidoc @@ -67,13 +67,13 @@ HTTP requests to the bulk API are expected to return a 200 response code. All ot The following document errors are handled as follows: -* 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <> for more info. +* 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <<{version}-dlq-policy>> for more info. * 409 errors (conflict) are logged as a warning and dropped. Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. It is more performant for Elasticsearch to retry these exceptions than this plugin. -[[dlq-policy]] +[id="{version}-dlq-policy"] ==== DLQ Policy Mapping (404) errors from Elasticsearch can lead to data loss. Unfortunately @@ -83,7 +83,7 @@ original events causing the mapping errors are stored in a file that can be processed at a later time. Often times, the offending field can be removed and re-indexed to Elasticsearch. If the DLQ is not enabled, and a mapping error happens, the problem is logged as a warning, and the event is dropped. See -<> for more information about processing events in the DLQ. +{logstash-ref}/dead-letter-queues.html[dead-letter-queues] for more information about processing events in the DLQ. ==== Batch Sizes @@ -120,54 +120,54 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -178,7 +178,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-action"] ===== `action` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"index"` Protocol agnostic (i.e. non-http, non-java specific) configs go here @@ -199,7 +199,7 @@ For more details on actions, check out the http://www.elastic.co/guide/en/elasti [id="{version}-plugins-{type}s-{plugin}-bulk_path"] ===== `bulk_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to perform the _bulk requests to @@ -208,7 +208,7 @@ this defaults to a concatenation of the path parameter and "_bulk" [id="{version}-plugins-{type}s-{plugin}-cacert"] ===== `cacert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The .cer or .pem file to validate the server's certificate @@ -216,7 +216,7 @@ The .cer or .pem file to validate the server's certificate [id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] ===== `doc_as_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable `doc_as_upsert` for update mode. @@ -225,7 +225,7 @@ Create a new document with source if `document_id` doesn't exist in Elasticsearc [id="{version}-plugins-{type}s-{plugin}-document_id"] ===== `document_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document ID for the index. Useful for overwriting existing entries in @@ -234,7 +234,7 @@ Elasticsearch with the same ID. [id="{version}-plugins-{type}s-{plugin}-document_type"] ===== `document_type` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document type to write events to. Generally you should try to write only @@ -245,7 +245,7 @@ otherwise the document type will be assigned the value of 'logs' [id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] ===== `failure_type_logging_whitelist` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Set the Elasticsearch errors in the whitelist that you don't want to log. @@ -255,7 +255,7 @@ which are `document_already_exists_exception`. [id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] ===== `healthcheck_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path where a HEAD request is sent when a backend is marked down @@ -266,7 +266,7 @@ If you have custom firewall rules you may need to change this [id="{version}-plugins-{type}s-{plugin}-hosts"] ===== `hosts` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * Default value is `[//127.0.0.1]` Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. @@ -284,7 +284,7 @@ Any special characters present in the URLs here MUST be URL escaped! This means [id="{version}-plugins-{type}s-{plugin}-http_compression"] ===== `http_compression` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond @@ -292,7 +292,7 @@ Enable gzip compression on requests. Note that response compression is on by def [id="{version}-plugins-{type}s-{plugin}-index"] ===== `index` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash-%{+YYYY.MM.dd}"` The index to write events to. This can be dynamic using the `%{foo}` syntax. @@ -306,7 +306,7 @@ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/for [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The keystore used to present a certificate to the server. @@ -315,7 +315,7 @@ It can be either .jks or .p12 [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the keystore password @@ -323,7 +323,7 @@ Set the keystore password [id="{version}-plugins-{type}s-{plugin}-manage_template"] ===== `manage_template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` From Logstash 1.3 onwards, a template is applied to Elasticsearch during @@ -342,7 +342,7 @@ API to apply your templates manually. [id="{version}-plugins-{type}s-{plugin}-parameters"] ===== `parameters` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Pass a set of key value pairs as the URL query string. This query string is added @@ -352,7 +352,7 @@ urls that already have query strings, the one specified here will be appended. [id="{version}-plugins-{type}s-{plugin}-parent"] ===== `parent` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` For child documents, ID of the associated parent. @@ -361,7 +361,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password to authenticate to a secure Elasticsearch cluster @@ -369,7 +369,7 @@ Password to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps @@ -380,7 +380,7 @@ not also set this field. That will raise an error at startup [id="{version}-plugins-{type}s-{plugin}-pipeline"] ===== `pipeline` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration @@ -389,7 +389,7 @@ here like `pipeline => "%{INGEST_PIPELINE}"` [id="{version}-plugins-{type}s-{plugin}-pool_max"] ===== `pool_max` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1000` While the output tries to reuse connections efficiently we have a maximum. @@ -400,7 +400,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] ===== `pool_max_per_route` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `100` While the output tries to reuse connections efficiently we have a maximum per endpoint. @@ -411,7 +411,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-proxy"] ===== `proxy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * There is no default value for this setting. Set the address of a forward HTTP proxy. @@ -421,7 +421,7 @@ arguments of the URI type to prevent leaking credentials. [id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] ===== `resurrect_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How frequently, in seconds, to wait between resurrection attempts. @@ -431,7 +431,7 @@ to see if they have come back to life [id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] ===== `retry_initial_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` @@ -439,7 +439,7 @@ Set initial interval in seconds between bulk retries. Doubled on each retry up t [id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] ===== `retry_max_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `64` Set max interval in seconds between bulk retries. @@ -447,7 +447,7 @@ Set max interval in seconds between bulk retries. [id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] ===== `retry_on_conflict` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The number of times Elasticsearch should internally retry an update/upserted document @@ -457,7 +457,7 @@ for more info [id="{version}-plugins-{type}s-{plugin}-routing"] ===== `routing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. A routing override to be applied to all processed events. @@ -466,7 +466,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-script"] ===== `script` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set script name for scripted update mode @@ -474,7 +474,7 @@ Set script name for scripted update mode [id="{version}-plugins-{type}s-{plugin}-script_lang"] ===== `script_lang` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"painless"` Set the language of the used script. If not set, this defaults to painless in ES 5.0 @@ -493,7 +493,7 @@ Define the type of script referenced by "script" variable [id="{version}-plugins-{type}s-{plugin}-script_var_name"] ===== `script_var_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"event"` Set variable name passed to script (scripted update) @@ -501,7 +501,7 @@ Set variable name passed to script (scripted update) [id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] ===== `scripted_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` if enabled, script is in charge of creating non-existent document (scripted update) @@ -509,7 +509,7 @@ if enabled, script is in charge of creating non-existent document (scripted upda [id="{version}-plugins-{type}s-{plugin}-sniffing"] ===== `sniffing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. @@ -521,7 +521,7 @@ manually enter multiple Elasticsearch hosts using the `hosts` parameter. [id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] ===== `sniffing_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How long to wait, in seconds, between sniffing attempts @@ -529,7 +529,7 @@ How long to wait, in seconds, between sniffing attempts [id="{version}-plugins-{type}s-{plugin}-sniffing_path"] ===== `sniffing_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to be used for the sniffing requests @@ -540,7 +540,7 @@ do not use full URL here, only paths, e.g. "/sniff/_nodes/http" [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme @@ -550,7 +550,7 @@ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS U [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] ===== `ssl_certificate_verification` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Option to validate the server's certificate. Disabling this severely compromises security. @@ -560,7 +560,7 @@ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf [id="{version}-plugins-{type}s-{plugin}-template"] ===== `template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. You can set the path to your own template here, if you so desire. @@ -569,7 +569,7 @@ If not set, the included template will be used. [id="{version}-plugins-{type}s-{plugin}-template_name"] ===== `template_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash"` This configuration option defines how the template is named inside Elasticsearch. @@ -583,7 +583,7 @@ where `OldTemplateName` is whatever the former setting was. [id="{version}-plugins-{type}s-{plugin}-template_overwrite"] ===== `template_overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` The template_overwrite option will always overwrite the indicated template @@ -600,7 +600,7 @@ the "logstash" template (i.e. removing all customized settings) [id="{version}-plugins-{type}s-{plugin}-timeout"] ===== `timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If @@ -609,7 +609,7 @@ a timeout occurs, the request will be retried. [id="{version}-plugins-{type}s-{plugin}-truststore"] ===== `truststore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS truststore to validate the server's certificate. @@ -618,7 +618,7 @@ Use either `:truststore` or `:cacert` [id="{version}-plugins-{type}s-{plugin}-truststore_password"] ===== `truststore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -626,7 +626,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-upsert"] ===== `upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set upsert content for update mode.s @@ -635,7 +635,7 @@ Create a new document with this parameter as json string if `document_id` doesn' [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate to a secure Elasticsearch cluster @@ -643,7 +643,7 @@ Username to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] ===== `validate_after_inactivity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. @@ -658,7 +658,7 @@ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. diff --git a/docs/versioned-plugins/outputs/elasticsearch-v9.0.0.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v9.0.0.asciidoc index 431622e76..34c2f8fc3 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-v9.0.0.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-v9.0.0.asciidoc @@ -67,13 +67,13 @@ HTTP requests to the bulk API are expected to return a 200 response code. All ot The following document errors are handled as follows: -* 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <> for more info. +* 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <<{version}-dlq-policy>> for more info. * 409 errors (conflict) are logged as a warning and dropped. Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. It is more performant for Elasticsearch to retry these exceptions than this plugin. -[[dlq-policy]] +[id="{version}-dlq-policy"] ==== DLQ Policy Mapping (404) errors from Elasticsearch can lead to data loss. Unfortunately @@ -83,7 +83,7 @@ original events causing the mapping errors are stored in a file that can be processed at a later time. Often times, the offending field can be removed and re-indexed to Elasticsearch. If the DLQ is not enabled, and a mapping error happens, the problem is logged as a warning, and the event is dropped. See -<> for more information about processing events in the DLQ. +{logstash-ref}/dead-letter-queues.html[dead-letter-queues] for more information about processing events in the DLQ. ==== Batch Sizes @@ -120,54 +120,54 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -178,7 +178,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-action"] ===== `action` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"index"` Protocol agnostic (i.e. non-http, non-java specific) configs go here @@ -199,7 +199,7 @@ For more details on actions, check out the http://www.elastic.co/guide/en/elasti [id="{version}-plugins-{type}s-{plugin}-bulk_path"] ===== `bulk_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to perform the _bulk requests to @@ -208,7 +208,7 @@ this defaults to a concatenation of the path parameter and "_bulk" [id="{version}-plugins-{type}s-{plugin}-cacert"] ===== `cacert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The .cer or .pem file to validate the server's certificate @@ -216,7 +216,7 @@ The .cer or .pem file to validate the server's certificate [id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] ===== `doc_as_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable `doc_as_upsert` for update mode. @@ -225,7 +225,7 @@ Create a new document with source if `document_id` doesn't exist in Elasticsearc [id="{version}-plugins-{type}s-{plugin}-document_id"] ===== `document_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document ID for the index. Useful for overwriting existing entries in @@ -234,7 +234,7 @@ Elasticsearch with the same ID. [id="{version}-plugins-{type}s-{plugin}-document_type"] ===== `document_type` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. * This option is deprecated @@ -248,7 +248,7 @@ otherwise the document type will be assigned the value of 'doc'. [id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] ===== `failure_type_logging_whitelist` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Set the Elasticsearch errors in the whitelist that you don't want to log. @@ -258,7 +258,7 @@ which are `document_already_exists_exception`. [id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] ===== `healthcheck_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path where a HEAD request is sent when a backend is marked down @@ -269,7 +269,7 @@ If you have custom firewall rules you may need to change this [id="{version}-plugins-{type}s-{plugin}-hosts"] ===== `hosts` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * Default value is `[//127.0.0.1]` Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. @@ -287,7 +287,7 @@ Any special characters present in the URLs here MUST be URL escaped! This means [id="{version}-plugins-{type}s-{plugin}-http_compression"] ===== `http_compression` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond @@ -295,7 +295,7 @@ Enable gzip compression on requests. Note that response compression is on by def [id="{version}-plugins-{type}s-{plugin}-index"] ===== `index` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash-%{+YYYY.MM.dd}"` The index to write events to. This can be dynamic using the `%{foo}` syntax. @@ -309,7 +309,7 @@ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/for [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The keystore used to present a certificate to the server. @@ -318,7 +318,7 @@ It can be either .jks or .p12 [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the keystore password @@ -326,7 +326,7 @@ Set the keystore password [id="{version}-plugins-{type}s-{plugin}-manage_template"] ===== `manage_template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` From Logstash 1.3 onwards, a template is applied to Elasticsearch during @@ -345,7 +345,7 @@ API to apply your templates manually. [id="{version}-plugins-{type}s-{plugin}-parameters"] ===== `parameters` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Pass a set of key value pairs as the URL query string. This query string is added @@ -355,7 +355,7 @@ urls that already have query strings, the one specified here will be appended. [id="{version}-plugins-{type}s-{plugin}-parent"] ===== `parent` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` For child documents, ID of the associated parent. @@ -364,7 +364,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password to authenticate to a secure Elasticsearch cluster @@ -372,7 +372,7 @@ Password to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps @@ -383,7 +383,7 @@ not also set this field. That will raise an error at startup [id="{version}-plugins-{type}s-{plugin}-pipeline"] ===== `pipeline` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration @@ -392,7 +392,7 @@ here like `pipeline => "%{INGEST_PIPELINE}"` [id="{version}-plugins-{type}s-{plugin}-pool_max"] ===== `pool_max` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1000` While the output tries to reuse connections efficiently we have a maximum. @@ -403,7 +403,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] ===== `pool_max_per_route` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `100` While the output tries to reuse connections efficiently we have a maximum per endpoint. @@ -414,7 +414,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-proxy"] ===== `proxy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * There is no default value for this setting. Set the address of a forward HTTP proxy. @@ -424,7 +424,7 @@ arguments of the URI type to prevent leaking credentials. [id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] ===== `resurrect_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How frequently, in seconds, to wait between resurrection attempts. @@ -434,7 +434,7 @@ to see if they have come back to life [id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] ===== `retry_initial_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` @@ -442,7 +442,7 @@ Set initial interval in seconds between bulk retries. Doubled on each retry up t [id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] ===== `retry_max_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `64` Set max interval in seconds between bulk retries. @@ -450,7 +450,7 @@ Set max interval in seconds between bulk retries. [id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] ===== `retry_on_conflict` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The number of times Elasticsearch should internally retry an update/upserted document @@ -460,7 +460,7 @@ for more info [id="{version}-plugins-{type}s-{plugin}-routing"] ===== `routing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. A routing override to be applied to all processed events. @@ -469,7 +469,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-script"] ===== `script` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set script name for scripted update mode @@ -477,7 +477,7 @@ Set script name for scripted update mode [id="{version}-plugins-{type}s-{plugin}-script_lang"] ===== `script_lang` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"painless"` Set the language of the used script. If not set, this defaults to painless in ES 5.0. @@ -497,7 +497,7 @@ Define the type of script referenced by "script" variable [id="{version}-plugins-{type}s-{plugin}-script_var_name"] ===== `script_var_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"event"` Set variable name passed to script (scripted update) @@ -505,7 +505,7 @@ Set variable name passed to script (scripted update) [id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] ===== `scripted_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` if enabled, script is in charge of creating non-existent document (scripted update) @@ -513,7 +513,7 @@ if enabled, script is in charge of creating non-existent document (scripted upda [id="{version}-plugins-{type}s-{plugin}-sniffing"] ===== `sniffing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. @@ -525,7 +525,7 @@ manually enter multiple Elasticsearch hosts using the `hosts` parameter. [id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] ===== `sniffing_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How long to wait, in seconds, between sniffing attempts @@ -533,7 +533,7 @@ How long to wait, in seconds, between sniffing attempts [id="{version}-plugins-{type}s-{plugin}-sniffing_path"] ===== `sniffing_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to be used for the sniffing requests @@ -544,7 +544,7 @@ do not use full URL here, only paths, e.g. "/sniff/_nodes/http" [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme @@ -554,7 +554,7 @@ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS U [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] ===== `ssl_certificate_verification` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Option to validate the server's certificate. Disabling this severely compromises security. @@ -564,7 +564,7 @@ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf [id="{version}-plugins-{type}s-{plugin}-template"] ===== `template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. You can set the path to your own template here, if you so desire. @@ -573,7 +573,7 @@ If not set, the included template will be used. [id="{version}-plugins-{type}s-{plugin}-template_name"] ===== `template_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash"` This configuration option defines how the template is named inside Elasticsearch. @@ -587,7 +587,7 @@ where `OldTemplateName` is whatever the former setting was. [id="{version}-plugins-{type}s-{plugin}-template_overwrite"] ===== `template_overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` The template_overwrite option will always overwrite the indicated template @@ -604,7 +604,7 @@ the "logstash" template (i.e. removing all customized settings) [id="{version}-plugins-{type}s-{plugin}-timeout"] ===== `timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If @@ -613,7 +613,7 @@ a timeout occurs, the request will be retried. [id="{version}-plugins-{type}s-{plugin}-truststore"] ===== `truststore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS truststore to validate the server's certificate. @@ -622,7 +622,7 @@ Use either `:truststore` or `:cacert` [id="{version}-plugins-{type}s-{plugin}-truststore_password"] ===== `truststore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -630,7 +630,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-upsert"] ===== `upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set upsert content for update mode.s @@ -639,7 +639,7 @@ Create a new document with this parameter as json string if `document_id` doesn' [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate to a secure Elasticsearch cluster @@ -647,7 +647,7 @@ Username to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] ===== `validate_after_inactivity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. @@ -662,7 +662,7 @@ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. diff --git a/docs/versioned-plugins/outputs/elasticsearch-v9.0.2.asciidoc b/docs/versioned-plugins/outputs/elasticsearch-v9.0.2.asciidoc index f7c9238d1..07dbc238b 100644 --- a/docs/versioned-plugins/outputs/elasticsearch-v9.0.2.asciidoc +++ b/docs/versioned-plugins/outputs/elasticsearch-v9.0.2.asciidoc @@ -67,13 +67,13 @@ HTTP requests to the bulk API are expected to return a 200 response code. All ot The following document errors are handled as follows: - * 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <> for more info. + * 400 and 404 errors are sent to the dead letter queue (DLQ), if enabled. If a DLQ is not enabled, a log message will be emitted, and the event will be dropped. See <<{version}-dlq-policy>> for more info. * 409 errors (conflict) are logged as a warning and dropped. Note that 409 exceptions are no longer retried. Please set a higher `retry_on_conflict` value if you experience 409 exceptions. It is more performant for Elasticsearch to retry these exceptions than this plugin. -[[dlq-policy]] +[id="{version}-dlq-policy"] ==== DLQ Policy Mapping (404) errors from Elasticsearch can lead to data loss. Unfortunately @@ -83,7 +83,7 @@ original events causing the mapping errors are stored in a file that can be processed at a later time. Often times, the offending field can be removed and re-indexed to Elasticsearch. If the DLQ is not enabled, and a mapping error happens, the problem is logged as a warning, and the event is dropped. See -<> for more information about processing events in the DLQ. +{logstash-ref}/dead-letter-queues.html[dead-letter-queues] for more information about processing events in the DLQ. ==== Batch Sizes @@ -120,54 +120,54 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-action>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-action>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-bulk_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-cacert>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_id>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-document_type>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-hosts>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-http_compression>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-index>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-doc_as_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-document_id>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-document_type>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-healthcheck_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-hosts>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-http_compression>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-index>> |{logstash-ref}/configuration-file-structure.html#string[string]|No | <<{version}-plugins-{type}s-{plugin}-keystore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-manage_template>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parameters>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-parent>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pipeline>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-proxy>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-routing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_lang>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-script_type>> |<>, one of `["inline", "indexed", "file"]`|No -| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-keystore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-manage_template>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-parameters>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-parent>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pipeline>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-pool_max_per_route>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-proxy>> |{logstash-ref}/configuration-file-structure.html#uri[uri]|No +| <<{version}-plugins-{type}s-{plugin}-resurrect_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_initial_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_max_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-retry_on_conflict>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-routing>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_lang>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-script_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["inline", "indexed", "file"]`|No +| <<{version}-plugins-{type}s-{plugin}-script_var_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-scripted_upsert>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_delay>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-sniffing_path>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-ssl>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-ssl_certificate_verification>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No | <<{version}-plugins-{type}s-{plugin}-template>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-template_name>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timeout>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-template_name>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-template_overwrite>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timeout>> |{logstash-ref}/configuration-file-structure.html#number[number]|No | <<{version}-plugins-{type}s-{plugin}-truststore>> |a valid filesystem path|No -| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-upsert>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-user>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-version_type>> |<>, one of `["internal", "external", "external_gt", "external_gte", "force"]`|No +| <<{version}-plugins-{type}s-{plugin}-truststore_password>> |{logstash-ref}/configuration-file-structure.html#password[password]|No +| <<{version}-plugins-{type}s-{plugin}-upsert>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-user>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-validate_after_inactivity>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-version>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-version_type>> |{logstash-ref}/configuration-file-structure.html#string[string], one of `["internal", "external", "external_gt", "external_gte", "force"]`|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -178,7 +178,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-action"] ===== `action` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"index"` Protocol agnostic (i.e. non-http, non-java specific) configs go here @@ -199,7 +199,7 @@ For more details on actions, check out the http://www.elastic.co/guide/en/elasti [id="{version}-plugins-{type}s-{plugin}-bulk_path"] ===== `bulk_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to perform the _bulk requests to @@ -208,7 +208,7 @@ this defaults to a concatenation of the path parameter and "_bulk" [id="{version}-plugins-{type}s-{plugin}-cacert"] ===== `cacert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The .cer or .pem file to validate the server's certificate @@ -216,7 +216,7 @@ The .cer or .pem file to validate the server's certificate [id="{version}-plugins-{type}s-{plugin}-doc_as_upsert"] ===== `doc_as_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable `doc_as_upsert` for update mode. @@ -225,7 +225,7 @@ Create a new document with source if `document_id` doesn't exist in Elasticsearc [id="{version}-plugins-{type}s-{plugin}-document_id"] ===== `document_id` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The document ID for the index. Useful for overwriting existing entries in @@ -234,7 +234,7 @@ Elasticsearch with the same ID. [id="{version}-plugins-{type}s-{plugin}-document_type"] ===== `document_type` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. * This option is deprecated @@ -250,7 +250,7 @@ If you don't set a value for this option: [id="{version}-plugins-{type}s-{plugin}-failure_type_logging_whitelist"] ===== `failure_type_logging_whitelist` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Set the Elasticsearch errors in the whitelist that you don't want to log. @@ -260,7 +260,7 @@ which are `document_already_exists_exception`. [id="{version}-plugins-{type}s-{plugin}-healthcheck_path"] ===== `healthcheck_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path where a HEAD request is sent when a backend is marked down @@ -271,7 +271,7 @@ If you have custom firewall rules you may need to change this [id="{version}-plugins-{type}s-{plugin}-hosts"] ===== `hosts` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * Default value is `[//127.0.0.1]` Sets the host(s) of the remote instance. If given an array it will load balance requests across the hosts specified in the `hosts` parameter. @@ -289,7 +289,7 @@ Any special characters present in the URLs here MUST be URL escaped! This means [id="{version}-plugins-{type}s-{plugin}-http_compression"] ===== `http_compression` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enable gzip compression on requests. Note that response compression is on by default for Elasticsearch v5.0 and beyond @@ -297,7 +297,7 @@ Enable gzip compression on requests. Note that response compression is on by def [id="{version}-plugins-{type}s-{plugin}-index"] ===== `index` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash-%{+YYYY.MM.dd}"` The index to write events to. This can be dynamic using the `%{foo}` syntax. @@ -311,7 +311,7 @@ Joda formats are defined http://www.joda.org/joda-time/apidocs/org/joda/time/for [id="{version}-plugins-{type}s-{plugin}-keystore"] ===== `keystore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The keystore used to present a certificate to the server. @@ -320,7 +320,7 @@ It can be either .jks or .p12 [id="{version}-plugins-{type}s-{plugin}-keystore_password"] ===== `keystore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the keystore password @@ -328,7 +328,7 @@ Set the keystore password [id="{version}-plugins-{type}s-{plugin}-manage_template"] ===== `manage_template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` From Logstash 1.3 onwards, a template is applied to Elasticsearch during @@ -347,7 +347,7 @@ API to apply your templates manually. [id="{version}-plugins-{type}s-{plugin}-parameters"] ===== `parameters` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * There is no default value for this setting. Pass a set of key value pairs as the URL query string. This query string is added @@ -357,7 +357,7 @@ urls that already have query strings, the one specified here will be appended. [id="{version}-plugins-{type}s-{plugin}-parent"] ===== `parent` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` For child documents, ID of the associated parent. @@ -366,7 +366,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Password to authenticate to a secure Elasticsearch cluster @@ -374,7 +374,7 @@ Password to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-path"] ===== `path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path at which the Elasticsearch server lives. Use this if you must run Elasticsearch behind a proxy that remaps @@ -385,7 +385,7 @@ not also set this field. That will raise an error at startup [id="{version}-plugins-{type}s-{plugin}-pipeline"] ===== `pipeline` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `nil` Set which ingest pipeline you wish to execute for an event. You can also use event dependent configuration @@ -394,7 +394,7 @@ here like `pipeline => "%{INGEST_PIPELINE}"` [id="{version}-plugins-{type}s-{plugin}-pool_max"] ===== `pool_max` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1000` While the output tries to reuse connections efficiently we have a maximum. @@ -405,7 +405,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-pool_max_per_route"] ===== `pool_max_per_route` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `100` While the output tries to reuse connections efficiently we have a maximum per endpoint. @@ -416,7 +416,7 @@ which is bad. [id="{version}-plugins-{type}s-{plugin}-proxy"] ===== `proxy` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#uri[uri] * There is no default value for this setting. Set the address of a forward HTTP proxy. @@ -426,7 +426,7 @@ arguments of the URI type to prevent leaking credentials. [id="{version}-plugins-{type}s-{plugin}-resurrect_delay"] ===== `resurrect_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How frequently, in seconds, to wait between resurrection attempts. @@ -436,7 +436,7 @@ to see if they have come back to life [id="{version}-plugins-{type}s-{plugin}-retry_initial_interval"] ===== `retry_initial_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Set initial interval in seconds between bulk retries. Doubled on each retry up to `retry_max_interval` @@ -444,7 +444,7 @@ Set initial interval in seconds between bulk retries. Doubled on each retry up t [id="{version}-plugins-{type}s-{plugin}-retry_max_interval"] ===== `retry_max_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `64` Set max interval in seconds between bulk retries. @@ -452,7 +452,7 @@ Set max interval in seconds between bulk retries. [id="{version}-plugins-{type}s-{plugin}-retry_on_conflict"] ===== `retry_on_conflict` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `1` The number of times Elasticsearch should internally retry an update/upserted document @@ -462,7 +462,7 @@ for more info [id="{version}-plugins-{type}s-{plugin}-routing"] ===== `routing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. A routing override to be applied to all processed events. @@ -471,7 +471,7 @@ This can be dynamic using the `%{foo}` syntax. [id="{version}-plugins-{type}s-{plugin}-script"] ===== `script` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set script name for scripted update mode @@ -479,7 +479,7 @@ Set script name for scripted update mode [id="{version}-plugins-{type}s-{plugin}-script_lang"] ===== `script_lang` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"painless"` Set the language of the used script. If not set, this defaults to painless in ES 5.0. @@ -499,7 +499,7 @@ Define the type of script referenced by "script" variable [id="{version}-plugins-{type}s-{plugin}-script_var_name"] ===== `script_var_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"event"` Set variable name passed to script (scripted update) @@ -507,7 +507,7 @@ Set variable name passed to script (scripted update) [id="{version}-plugins-{type}s-{plugin}-scripted_upsert"] ===== `scripted_upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` if enabled, script is in charge of creating non-existent document (scripted update) @@ -515,7 +515,7 @@ if enabled, script is in charge of creating non-existent document (scripted upda [id="{version}-plugins-{type}s-{plugin}-sniffing"] ===== `sniffing` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` This setting asks Elasticsearch for the list of all cluster nodes and adds them to the hosts list. @@ -527,7 +527,7 @@ manually enter multiple Elasticsearch hosts using the `hosts` parameter. [id="{version}-plugins-{type}s-{plugin}-sniffing_delay"] ===== `sniffing_delay` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `5` How long to wait, in seconds, between sniffing attempts @@ -535,7 +535,7 @@ How long to wait, in seconds, between sniffing attempts [id="{version}-plugins-{type}s-{plugin}-sniffing_path"] ===== `sniffing_path` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. HTTP Path to be used for the sniffing requests @@ -546,7 +546,7 @@ do not use full URL here, only paths, e.g. "/sniff/_nodes/http" [id="{version}-plugins-{type}s-{plugin}-ssl"] ===== `ssl` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * There is no default value for this setting. Enable SSL/TLS secured communication to Elasticsearch cluster. Leaving this unspecified will use whatever scheme @@ -556,7 +556,7 @@ If SSL is explicitly disabled here the plugin will refuse to start if an HTTPS U [id="{version}-plugins-{type}s-{plugin}-ssl_certificate_verification"] ===== `ssl_certificate_verification` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` Option to validate the server's certificate. Disabling this severely compromises security. @@ -566,7 +566,7 @@ https://www.cs.utexas.edu/~shmat/shmat_ccs12.pdf [id="{version}-plugins-{type}s-{plugin}-template"] ===== `template` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. You can set the path to your own template here, if you so desire. @@ -575,7 +575,7 @@ If not set, the included template will be used. [id="{version}-plugins-{type}s-{plugin}-template_name"] ===== `template_name` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash"` This configuration option defines how the template is named inside Elasticsearch. @@ -589,7 +589,7 @@ where `OldTemplateName` is whatever the former setting was. [id="{version}-plugins-{type}s-{plugin}-template_overwrite"] ===== `template_overwrite` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` The template_overwrite option will always overwrite the indicated template @@ -606,7 +606,7 @@ the "logstash" template (i.e. removing all customized settings) [id="{version}-plugins-{type}s-{plugin}-timeout"] ===== `timeout` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `60` Set the timeout, in seconds, for network operations and requests sent Elasticsearch. If @@ -615,7 +615,7 @@ a timeout occurs, the request will be retried. [id="{version}-plugins-{type}s-{plugin}-truststore"] ===== `truststore` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#path[path] * There is no default value for this setting. The JKS truststore to validate the server's certificate. @@ -624,7 +624,7 @@ Use either `:truststore` or `:cacert` [id="{version}-plugins-{type}s-{plugin}-truststore_password"] ===== `truststore_password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#password[password] * There is no default value for this setting. Set the truststore password @@ -632,7 +632,7 @@ Set the truststore password [id="{version}-plugins-{type}s-{plugin}-upsert"] ===== `upsert` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Set upsert content for update mode.s @@ -641,7 +641,7 @@ Create a new document with this parameter as json string if `document_id` doesn' [id="{version}-plugins-{type}s-{plugin}-user"] ===== `user` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate to a secure Elasticsearch cluster @@ -649,7 +649,7 @@ Username to authenticate to a secure Elasticsearch cluster [id="{version}-plugins-{type}s-{plugin}-validate_after_inactivity"] ===== `validate_after_inactivity` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `10000` How long to wait before checking if the connection is stale before executing a request on a connection using keepalive. @@ -664,7 +664,7 @@ See https://hc.apache.org/httpcomponents-client-ga/httpclient/apidocs/org/apache [id="{version}-plugins-{type}s-{plugin}-version"] ===== `version` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The version to use for indexing. Use sprintf syntax like `%{my_version}` to use a field value here. diff --git a/docs/versioned-plugins/outputs/email-v4.0.4.asciidoc b/docs/versioned-plugins/outputs/email-v4.0.4.asciidoc index 698db27f9..2ac93cce1 100644 --- a/docs/versioned-plugins/outputs/email-v4.0.4.asciidoc +++ b/docs/versioned-plugins/outputs/email-v4.0.4.asciidoc @@ -12,7 +12,7 @@ START - GENERATED VARIABLES, DO NOT EDIT! END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Email output plugin {version} @@ -20,6 +20,8 @@ include::{include_path}/plugin_header.asciidoc[] ==== Description +Sends email when an output is received. Alternatively, you may include or +exclude the email output execution using conditionals. ==== Usage Example [source,ruby] @@ -48,24 +50,24 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-attachments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-authentication>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-body>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cc>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-contenttype>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-debug>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-domain>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-from>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-htmlbody>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-replyto>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-subject>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-to>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-use_tls>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-via>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-address>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-attachments>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-authentication>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-body>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cc>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-contenttype>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-debug>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-domain>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-from>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-htmlbody>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-replyto>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-subject>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-to>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-use_tls>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-username>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-via>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -76,7 +78,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-address"] ===== `address` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"localhost"` The address used to connect to the mail server @@ -84,7 +86,7 @@ The address used to connect to the mail server [id="{version}-plugins-{type}s-{plugin}-attachments"] ===== `attachments` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Attachments - specify the name(s) and location(s) of the files. @@ -92,7 +94,7 @@ Attachments - specify the name(s) and location(s) of the files. [id="{version}-plugins-{type}s-{plugin}-authentication"] ===== `authentication` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Authentication method used when identifying with the server @@ -100,7 +102,7 @@ Authentication method used when identifying with the server [id="{version}-plugins-{type}s-{plugin}-body"] ===== `body` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Body for the email - plain text only. @@ -108,7 +110,7 @@ Body for the email - plain text only. [id="{version}-plugins-{type}s-{plugin}-cc"] ===== `cc` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The fully-qualified email address(es) to include as cc: address(es). @@ -119,7 +121,7 @@ This field also accepts a comma-separated string of addresses, for example: [id="{version}-plugins-{type}s-{plugin}-contenttype"] ===== `contenttype` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"text/html; charset=UTF-8"` contenttype : for multipart messages, set the content-type and/or charset of the HTML part. @@ -128,7 +130,7 @@ NOTE: this may not be functional (KH) [id="{version}-plugins-{type}s-{plugin}-debug"] ===== `debug` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Run the mail relay in debug mode @@ -136,7 +138,7 @@ Run the mail relay in debug mode [id="{version}-plugins-{type}s-{plugin}-domain"] ===== `domain` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"localhost"` Domain used to send the email messages @@ -144,7 +146,7 @@ Domain used to send the email messages [id="{version}-plugins-{type}s-{plugin}-from"] ===== `from` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash.alert@nowhere.com"` The fully-qualified email address for the From: field in the email. @@ -152,7 +154,7 @@ The fully-qualified email address for the From: field in the email. [id="{version}-plugins-{type}s-{plugin}-htmlbody"] ===== `htmlbody` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` HTML Body for the email, which may contain HTML markup. @@ -160,7 +162,7 @@ HTML Body for the email, which may contain HTML markup. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Password to authenticate with the server @@ -168,7 +170,7 @@ Password to authenticate with the server [id="{version}-plugins-{type}s-{plugin}-port"] ===== `port` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `25` Port used to communicate with the mail server @@ -176,7 +178,7 @@ Port used to communicate with the mail server [id="{version}-plugins-{type}s-{plugin}-replyto"] ===== `replyto` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The fully qualified email address for the Reply-To: field. @@ -184,7 +186,7 @@ The fully qualified email address for the Reply-To: field. [id="{version}-plugins-{type}s-{plugin}-subject"] ===== `subject` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Subject: for the email. @@ -193,7 +195,7 @@ Subject: for the email. ===== `to` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The fully-qualified email address to send the email to. @@ -206,7 +208,7 @@ You can also use dynamic fields from the event with the `%{fieldname}` syntax. [id="{version}-plugins-{type}s-{plugin}-use_tls"] ===== `use_tls` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enables TLS when communicating with the server @@ -214,7 +216,7 @@ Enables TLS when communicating with the server [id="{version}-plugins-{type}s-{plugin}-username"] ===== `username` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate with the server @@ -222,7 +224,7 @@ Username to authenticate with the server [id="{version}-plugins-{type}s-{plugin}-via"] ===== `via` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"smtp"` How Logstash should send the email, either via SMTP or by invoking sendmail. diff --git a/docs/versioned-plugins/outputs/email-v4.0.6.asciidoc b/docs/versioned-plugins/outputs/email-v4.0.6.asciidoc index 303be1097..065849e2a 100644 --- a/docs/versioned-plugins/outputs/email-v4.0.6.asciidoc +++ b/docs/versioned-plugins/outputs/email-v4.0.6.asciidoc @@ -48,24 +48,24 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-address>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-attachments>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-authentication>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-body>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-cc>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-contenttype>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-debug>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-domain>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-from>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-htmlbody>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-password>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-replyto>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-subject>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-to>> |<>|Yes -| <<{version}-plugins-{type}s-{plugin}-use_tls>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-username>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-via>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-address>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-attachments>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-authentication>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-body>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-cc>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-contenttype>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-debug>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-domain>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-from>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-htmlbody>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-password>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-replyto>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-subject>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-to>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes +| <<{version}-plugins-{type}s-{plugin}-use_tls>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-username>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-via>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -76,7 +76,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-address"] ===== `address` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"localhost"` The address used to connect to the mail server @@ -84,7 +84,7 @@ The address used to connect to the mail server [id="{version}-plugins-{type}s-{plugin}-attachments"] ===== `attachments` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[]` Attachments - specify the name(s) and location(s) of the files. @@ -92,7 +92,7 @@ Attachments - specify the name(s) and location(s) of the files. [id="{version}-plugins-{type}s-{plugin}-authentication"] ===== `authentication` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Authentication method used when identifying with the server @@ -100,7 +100,7 @@ Authentication method used when identifying with the server [id="{version}-plugins-{type}s-{plugin}-body"] ===== `body` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Body for the email - plain text only. @@ -108,7 +108,7 @@ Body for the email - plain text only. [id="{version}-plugins-{type}s-{plugin}-cc"] ===== `cc` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The fully-qualified email address(es) to include as cc: address(es). @@ -119,7 +119,7 @@ This field also accepts a comma-separated string of addresses, for example: [id="{version}-plugins-{type}s-{plugin}-contenttype"] ===== `contenttype` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"text/html; charset=UTF-8"` contenttype : for multipart messages, set the content-type and/or charset of the HTML part. @@ -128,7 +128,7 @@ NOTE: this may not be functional (KH) [id="{version}-plugins-{type}s-{plugin}-debug"] ===== `debug` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Run the mail relay in debug mode @@ -136,7 +136,7 @@ Run the mail relay in debug mode [id="{version}-plugins-{type}s-{plugin}-domain"] ===== `domain` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"localhost"` The HELO/EHLO domain name used in the greeting message when connecting @@ -146,7 +146,7 @@ actual hostname of the connecting client. [id="{version}-plugins-{type}s-{plugin}-from"] ===== `from` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"logstash.alert@example.com"` The fully-qualified email address for the From: field in the email. @@ -154,7 +154,7 @@ The fully-qualified email address for the From: field in the email. [id="{version}-plugins-{type}s-{plugin}-htmlbody"] ===== `htmlbody` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` HTML Body for the email, which may contain HTML markup. @@ -162,7 +162,7 @@ HTML Body for the email, which may contain HTML markup. [id="{version}-plugins-{type}s-{plugin}-password"] ===== `password` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Password to authenticate with the server @@ -170,7 +170,7 @@ Password to authenticate with the server [id="{version}-plugins-{type}s-{plugin}-port"] ===== `port` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `25` Port used to communicate with the mail server @@ -178,7 +178,7 @@ Port used to communicate with the mail server [id="{version}-plugins-{type}s-{plugin}-replyto"] ===== `replyto` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The fully qualified email address for the Reply-To: field. @@ -186,7 +186,7 @@ The fully qualified email address for the Reply-To: field. [id="{version}-plugins-{type}s-{plugin}-subject"] ===== `subject` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `""` Subject: for the email. @@ -195,7 +195,7 @@ Subject: for the email. ===== `to` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The fully-qualified email address to send the email to. @@ -208,7 +208,7 @@ You can also use dynamic fields from the event with the `%{fieldname}` syntax. [id="{version}-plugins-{type}s-{plugin}-use_tls"] ===== `use_tls` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Enables TLS when communicating with the server @@ -216,7 +216,7 @@ Enables TLS when communicating with the server [id="{version}-plugins-{type}s-{plugin}-username"] ===== `username` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. Username to authenticate with the server @@ -224,7 +224,7 @@ Username to authenticate with the server [id="{version}-plugins-{type}s-{plugin}-via"] ===== `via` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"smtp"` How Logstash should send the email, either via SMTP or by invoking sendmail. diff --git a/docs/versioned-plugins/outputs/file-v4.0.2.asciidoc b/docs/versioned-plugins/outputs/file-v4.0.2.asciidoc index aabd92a39..d81d33861 100644 --- a/docs/versioned-plugins/outputs/file-v4.0.2.asciidoc +++ b/docs/versioned-plugins/outputs/file-v4.0.2.asciidoc @@ -12,7 +12,7 @@ START - GENERATED VARIABLES, DO NOT EDIT! END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === File output plugin {version} @@ -41,13 +41,13 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -58,7 +58,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] ===== `create_if_deleted` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` If the configured file is deleted, but an event is handled by the plugin, @@ -67,7 +67,7 @@ the plugin will recreate the file. Default => true [id="{version}-plugins-{type}s-{plugin}-dir_mode"] ===== `dir_mode` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `-1` Dir access mode to use. Note that due to the bug in jruby system umask @@ -78,7 +78,7 @@ Example: `"dir_mode" => 0750` [id="{version}-plugins-{type}s-{plugin}-file_mode"] ===== `file_mode` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `-1` File access mode to use. Note that due to the bug in jruby system umask @@ -89,7 +89,7 @@ Example: `"file_mode" => 0640` [id="{version}-plugins-{type}s-{plugin}-filename_failure"] ===== `filename_failure` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"_filepath_failures"` If the generated path is invalid, the events will be saved @@ -98,7 +98,7 @@ into this file and inside the defined path. [id="{version}-plugins-{type}s-{plugin}-flush_interval"] ===== `flush_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Flush interval (in seconds) for flushing writes to log files. @@ -107,7 +107,7 @@ Flush interval (in seconds) for flushing writes to log files. [id="{version}-plugins-{type}s-{plugin}-gzip"] ===== `gzip` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Gzip the output stream before writing to disk. @@ -116,7 +116,7 @@ Gzip the output stream before writing to disk. ===== `path` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The path to the file to write. Event fields can be used here, diff --git a/docs/versioned-plugins/outputs/file-v4.1.0.asciidoc b/docs/versioned-plugins/outputs/file-v4.1.0.asciidoc index 17a94dfd7..286f07dea 100644 --- a/docs/versioned-plugins/outputs/file-v4.1.0.asciidoc +++ b/docs/versioned-plugins/outputs/file-v4.1.0.asciidoc @@ -41,13 +41,13 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -58,7 +58,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] ===== `create_if_deleted` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` If the configured file is deleted, but an event is handled by the plugin, @@ -67,7 +67,7 @@ the plugin will recreate the file. Default => true [id="{version}-plugins-{type}s-{plugin}-dir_mode"] ===== `dir_mode` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `-1` Dir access mode to use. Note that due to the bug in jruby system umask @@ -78,7 +78,7 @@ Example: `"dir_mode" => 0750` [id="{version}-plugins-{type}s-{plugin}-file_mode"] ===== `file_mode` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `-1` File access mode to use. Note that due to the bug in jruby system umask @@ -89,7 +89,7 @@ Example: `"file_mode" => 0640` [id="{version}-plugins-{type}s-{plugin}-filename_failure"] ===== `filename_failure` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"_filepath_failures"` If the generated path is invalid, the events will be saved @@ -98,7 +98,7 @@ into this file and inside the defined path. [id="{version}-plugins-{type}s-{plugin}-flush_interval"] ===== `flush_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Flush interval (in seconds) for flushing writes to log files. @@ -107,7 +107,7 @@ Flush interval (in seconds) for flushing writes to log files. [id="{version}-plugins-{type}s-{plugin}-gzip"] ===== `gzip` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Gzip the output stream before writing to disk. @@ -116,7 +116,7 @@ Gzip the output stream before writing to disk. ===== `path` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The path to the file to write. Event fields can be used here, diff --git a/docs/versioned-plugins/outputs/file-v4.1.1.asciidoc b/docs/versioned-plugins/outputs/file-v4.1.1.asciidoc index d2f3e389f..6244718ab 100644 --- a/docs/versioned-plugins/outputs/file-v4.1.1.asciidoc +++ b/docs/versioned-plugins/outputs/file-v4.1.1.asciidoc @@ -41,13 +41,13 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -58,7 +58,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] ===== `create_if_deleted` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` If the configured file is deleted, but an event is handled by the plugin, @@ -67,7 +67,7 @@ the plugin will recreate the file. Default => true [id="{version}-plugins-{type}s-{plugin}-dir_mode"] ===== `dir_mode` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `-1` Dir access mode to use. Note that due to the bug in jruby system umask @@ -78,7 +78,7 @@ Example: `"dir_mode" => 0750` [id="{version}-plugins-{type}s-{plugin}-file_mode"] ===== `file_mode` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `-1` File access mode to use. Note that due to the bug in jruby system umask @@ -89,7 +89,7 @@ Example: `"file_mode" => 0640` [id="{version}-plugins-{type}s-{plugin}-filename_failure"] ===== `filename_failure` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"_filepath_failures"` If the generated path is invalid, the events will be saved @@ -98,7 +98,7 @@ into this file and inside the defined path. [id="{version}-plugins-{type}s-{plugin}-flush_interval"] ===== `flush_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Flush interval (in seconds) for flushing writes to log files. @@ -107,7 +107,7 @@ Flush interval (in seconds) for flushing writes to log files. [id="{version}-plugins-{type}s-{plugin}-gzip"] ===== `gzip` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Gzip the output stream before writing to disk. @@ -116,7 +116,7 @@ Gzip the output stream before writing to disk. ===== `path` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The path to the file to write. Event fields can be used here, diff --git a/docs/versioned-plugins/outputs/file-v4.1.2.asciidoc b/docs/versioned-plugins/outputs/file-v4.1.2.asciidoc index d4e2b076c..a56ecad98 100644 --- a/docs/versioned-plugins/outputs/file-v4.1.2.asciidoc +++ b/docs/versioned-plugins/outputs/file-v4.1.2.asciidoc @@ -41,13 +41,13 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -58,7 +58,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] ===== `create_if_deleted` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` If the configured file is deleted, but an event is handled by the plugin, @@ -67,7 +67,7 @@ the plugin will recreate the file. Default => true [id="{version}-plugins-{type}s-{plugin}-dir_mode"] ===== `dir_mode` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `-1` Dir access mode to use. Note that due to the bug in jruby system umask @@ -78,7 +78,7 @@ Example: `"dir_mode" => 0750` [id="{version}-plugins-{type}s-{plugin}-file_mode"] ===== `file_mode` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `-1` File access mode to use. Note that due to the bug in jruby system umask @@ -89,7 +89,7 @@ Example: `"file_mode" => 0640` [id="{version}-plugins-{type}s-{plugin}-filename_failure"] ===== `filename_failure` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"_filepath_failures"` If the generated path is invalid, the events will be saved @@ -98,7 +98,7 @@ into this file and inside the defined path. [id="{version}-plugins-{type}s-{plugin}-flush_interval"] ===== `flush_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Flush interval (in seconds) for flushing writes to log files. @@ -107,7 +107,7 @@ Flush interval (in seconds) for flushing writes to log files. [id="{version}-plugins-{type}s-{plugin}-gzip"] ===== `gzip` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Gzip the output stream before writing to disk. @@ -116,7 +116,7 @@ Gzip the output stream before writing to disk. ===== `path` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The path to the file to write. Event fields can be used here, diff --git a/docs/versioned-plugins/outputs/file-v4.2.0.asciidoc b/docs/versioned-plugins/outputs/file-v4.2.0.asciidoc index b952d9d46..578cecaf0 100644 --- a/docs/versioned-plugins/outputs/file-v4.2.0.asciidoc +++ b/docs/versioned-plugins/outputs/file-v4.2.0.asciidoc @@ -41,13 +41,13 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -58,7 +58,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] ===== `create_if_deleted` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` If the configured file is deleted, but an event is handled by the plugin, @@ -67,7 +67,7 @@ the plugin will recreate the file. Default => true [id="{version}-plugins-{type}s-{plugin}-dir_mode"] ===== `dir_mode` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `-1` Dir access mode to use. Note that due to the bug in jruby system umask @@ -78,7 +78,7 @@ Example: `"dir_mode" => 0750` [id="{version}-plugins-{type}s-{plugin}-file_mode"] ===== `file_mode` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `-1` File access mode to use. Note that due to the bug in jruby system umask @@ -89,7 +89,7 @@ Example: `"file_mode" => 0640` [id="{version}-plugins-{type}s-{plugin}-filename_failure"] ===== `filename_failure` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"_filepath_failures"` If the generated path is invalid, the events will be saved @@ -98,7 +98,7 @@ into this file and inside the defined path. [id="{version}-plugins-{type}s-{plugin}-flush_interval"] ===== `flush_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Flush interval (in seconds) for flushing writes to log files. @@ -107,7 +107,7 @@ Flush interval (in seconds) for flushing writes to log files. [id="{version}-plugins-{type}s-{plugin}-gzip"] ===== `gzip` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Gzip the output stream before writing to disk. @@ -116,7 +116,7 @@ Gzip the output stream before writing to disk. ===== `path` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The path to the file to write. Event fields can be used here, diff --git a/docs/versioned-plugins/outputs/file-v4.2.1.asciidoc b/docs/versioned-plugins/outputs/file-v4.2.1.asciidoc index c12eb4cc1..1da9c8f14 100644 --- a/docs/versioned-plugins/outputs/file-v4.2.1.asciidoc +++ b/docs/versioned-plugins/outputs/file-v4.2.1.asciidoc @@ -41,13 +41,13 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-file_mode>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-gzip>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-path>> |<>|Yes +| <<{version}-plugins-{type}s-{plugin}-create_if_deleted>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-dir_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-file_mode>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-filename_failure>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-flush_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-gzip>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-path>> |{logstash-ref}/configuration-file-structure.html#string[string]|Yes |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -58,7 +58,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-create_if_deleted"] ===== `create_if_deleted` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `true` If the configured file is deleted, but an event is handled by the plugin, @@ -67,7 +67,7 @@ the plugin will recreate the file. Default => true [id="{version}-plugins-{type}s-{plugin}-dir_mode"] ===== `dir_mode` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `-1` Dir access mode to use. Note that due to the bug in jruby system umask @@ -78,7 +78,7 @@ Example: `"dir_mode" => 0750` [id="{version}-plugins-{type}s-{plugin}-file_mode"] ===== `file_mode` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `-1` File access mode to use. Note that due to the bug in jruby system umask @@ -89,7 +89,7 @@ Example: `"file_mode" => 0640` [id="{version}-plugins-{type}s-{plugin}-filename_failure"] ===== `filename_failure` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"_filepath_failures"` If the generated path is invalid, the events will be saved @@ -98,7 +98,7 @@ into this file and inside the defined path. [id="{version}-plugins-{type}s-{plugin}-flush_interval"] ===== `flush_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Flush interval (in seconds) for flushing writes to log files. @@ -107,7 +107,7 @@ Flush interval (in seconds) for flushing writes to log files. [id="{version}-plugins-{type}s-{plugin}-gzip"] ===== `gzip` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Gzip the output stream before writing to disk. @@ -116,7 +116,7 @@ Gzip the output stream before writing to disk. ===== `path` * This is a required setting. - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * There is no default value for this setting. The path to the file to write. Event fields can be used here, diff --git a/docs/versioned-plugins/outputs/graphite-v3.1.2.asciidoc b/docs/versioned-plugins/outputs/graphite-v3.1.2.asciidoc index 66a44ee5d..bce20b8e2 100644 --- a/docs/versioned-plugins/outputs/graphite-v3.1.2.asciidoc +++ b/docs/versioned-plugins/outputs/graphite-v3.1.2.asciidoc @@ -12,7 +12,7 @@ START - GENERATED VARIABLES, DO NOT EDIT! END - GENERATED VARIABLES, DO NOT EDIT! /////////////////////////////////////////// -[id="plugins-{type}-{plugin}"] +[id="{version}-plugins-{type}s-{plugin}"] === Graphite output plugin {version} @@ -35,17 +35,17 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nested_object_separator>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resend_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timestamp_field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-nested_object_separator>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-resend_on_failure>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timestamp_field>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -56,7 +56,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-exclude_metrics"] ===== `exclude_metrics` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `["%{[^}]+}"]` Exclude regex matched metric names, by default exclude unresolved %{field} strings. @@ -64,7 +64,7 @@ Exclude regex matched metric names, by default exclude unresolved %{field} strin [id="{version}-plugins-{type}s-{plugin}-fields_are_metrics"] ===== `fields_are_metrics` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` An array indicating that these event fields should be treated as metrics @@ -74,7 +74,7 @@ or `metrics`, but not both. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"localhost"` The hostname or IP address of the Graphite server. @@ -82,7 +82,7 @@ The hostname or IP address of the Graphite server. [id="{version}-plugins-{type}s-{plugin}-include_metrics"] ===== `include_metrics` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[".*"]` Include only regex matched metric names. @@ -90,7 +90,7 @@ Include only regex matched metric names. [id="{version}-plugins-{type}s-{plugin}-metrics"] ===== `metrics` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` The metric(s) to use. This supports dynamic strings like %{host} @@ -106,7 +106,7 @@ but not both. [id="{version}-plugins-{type}s-{plugin}-metrics_format"] ===== `metrics_format` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"*"` Defines the format of the metric string. The placeholder '*' will be @@ -119,7 +119,7 @@ NOTE: If no metrics_format is defined, the name of the metric will be used as fa [id="{version}-plugins-{type}s-{plugin}-nested_object_separator"] ===== `nested_object_separator` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"."` When hashes are passed in as values they are broken out into a dotted notation @@ -136,7 +136,7 @@ This config setting changes the separator from the '.' default. [id="{version}-plugins-{type}s-{plugin}-port"] ===== `port` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2003` The port to connect to on the Graphite server. @@ -144,7 +144,7 @@ The port to connect to on the Graphite server. [id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] ===== `reconnect_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Interval between reconnect attempts to Carbon. @@ -152,7 +152,7 @@ Interval between reconnect attempts to Carbon. [id="{version}-plugins-{type}s-{plugin}-resend_on_failure"] ===== `resend_on_failure` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Should metrics be resent on failure? @@ -160,7 +160,7 @@ Should metrics be resent on failure? [id="{version}-plugins-{type}s-{plugin}-timestamp_field"] ===== `timestamp_field` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"@timestamp"` Use this field for the timestamp instead of '@timestamp' which is the diff --git a/docs/versioned-plugins/outputs/graphite-v3.1.3.asciidoc b/docs/versioned-plugins/outputs/graphite-v3.1.3.asciidoc index 397880ed7..cc5d3c33f 100644 --- a/docs/versioned-plugins/outputs/graphite-v3.1.3.asciidoc +++ b/docs/versioned-plugins/outputs/graphite-v3.1.3.asciidoc @@ -35,17 +35,17 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nested_object_separator>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resend_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timestamp_field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-nested_object_separator>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-resend_on_failure>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timestamp_field>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -56,7 +56,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-exclude_metrics"] ===== `exclude_metrics` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `["%{[^}]+}"]` Exclude regex matched metric names, by default exclude unresolved %{field} strings. @@ -64,7 +64,7 @@ Exclude regex matched metric names, by default exclude unresolved %{field} strin [id="{version}-plugins-{type}s-{plugin}-fields_are_metrics"] ===== `fields_are_metrics` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` An array indicating that these event fields should be treated as metrics @@ -74,7 +74,7 @@ or `metrics`, but not both. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"localhost"` The hostname or IP address of the Graphite server. @@ -82,7 +82,7 @@ The hostname or IP address of the Graphite server. [id="{version}-plugins-{type}s-{plugin}-include_metrics"] ===== `include_metrics` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[".*"]` Include only regex matched metric names. @@ -90,7 +90,7 @@ Include only regex matched metric names. [id="{version}-plugins-{type}s-{plugin}-metrics"] ===== `metrics` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` The metric(s) to use. This supports dynamic strings like %{host} @@ -106,7 +106,7 @@ but not both. [id="{version}-plugins-{type}s-{plugin}-metrics_format"] ===== `metrics_format` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"*"` Defines the format of the metric string. The placeholder '*' will be @@ -119,7 +119,7 @@ NOTE: If no metrics_format is defined, the name of the metric will be used as fa [id="{version}-plugins-{type}s-{plugin}-nested_object_separator"] ===== `nested_object_separator` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"."` When hashes are passed in as values they are broken out into a dotted notation @@ -136,7 +136,7 @@ This config setting changes the separator from the '.' default. [id="{version}-plugins-{type}s-{plugin}-port"] ===== `port` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2003` The port to connect to on the Graphite server. @@ -144,7 +144,7 @@ The port to connect to on the Graphite server. [id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] ===== `reconnect_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Interval between reconnect attempts to Carbon. @@ -152,7 +152,7 @@ Interval between reconnect attempts to Carbon. [id="{version}-plugins-{type}s-{plugin}-resend_on_failure"] ===== `resend_on_failure` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Should metrics be resent on failure? @@ -160,7 +160,7 @@ Should metrics be resent on failure? [id="{version}-plugins-{type}s-{plugin}-timestamp_field"] ===== `timestamp_field` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"@timestamp"` Use this field for the timestamp instead of '@timestamp' which is the diff --git a/docs/versioned-plugins/outputs/graphite-v3.1.4.asciidoc b/docs/versioned-plugins/outputs/graphite-v3.1.4.asciidoc index ed950aa5e..972083f6b 100644 --- a/docs/versioned-plugins/outputs/graphite-v3.1.4.asciidoc +++ b/docs/versioned-plugins/outputs/graphite-v3.1.4.asciidoc @@ -35,17 +35,17 @@ This plugin supports the following configuration options plus the <<{version}-pl [cols="<,<,<",options="header",] |======================================================================= |Setting |Input type|Required -| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-host>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-nested_object_separator>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-port>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-resend_on_failure>> |<>|No -| <<{version}-plugins-{type}s-{plugin}-timestamp_field>> |<>|No +| <<{version}-plugins-{type}s-{plugin}-exclude_metrics>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-fields_are_metrics>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-host>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-include_metrics>> |{logstash-ref}/configuration-file-structure.html#array[array]|No +| <<{version}-plugins-{type}s-{plugin}-metrics>> |{logstash-ref}/configuration-file-structure.html#hash[hash]|No +| <<{version}-plugins-{type}s-{plugin}-metrics_format>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-nested_object_separator>> |{logstash-ref}/configuration-file-structure.html#string[string]|No +| <<{version}-plugins-{type}s-{plugin}-port>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-reconnect_interval>> |{logstash-ref}/configuration-file-structure.html#number[number]|No +| <<{version}-plugins-{type}s-{plugin}-resend_on_failure>> |{logstash-ref}/configuration-file-structure.html#boolean[boolean]|No +| <<{version}-plugins-{type}s-{plugin}-timestamp_field>> |{logstash-ref}/configuration-file-structure.html#string[string]|No |======================================================================= Also see <<{version}-plugins-{type}s-{plugin}-common-options>> for a list of options supported by all @@ -56,7 +56,7 @@ output plugins. [id="{version}-plugins-{type}s-{plugin}-exclude_metrics"] ===== `exclude_metrics` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `["%{[^}]+}"]` Exclude regex matched metric names, by default exclude unresolved %{field} strings. @@ -64,7 +64,7 @@ Exclude regex matched metric names, by default exclude unresolved %{field} strin [id="{version}-plugins-{type}s-{plugin}-fields_are_metrics"] ===== `fields_are_metrics` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` An array indicating that these event fields should be treated as metrics @@ -74,7 +74,7 @@ or `metrics`, but not both. [id="{version}-plugins-{type}s-{plugin}-host"] ===== `host` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"localhost"` The hostname or IP address of the Graphite server. @@ -82,7 +82,7 @@ The hostname or IP address of the Graphite server. [id="{version}-plugins-{type}s-{plugin}-include_metrics"] ===== `include_metrics` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#array[array] * Default value is `[".*"]` Include only regex matched metric names. @@ -90,7 +90,7 @@ Include only regex matched metric names. [id="{version}-plugins-{type}s-{plugin}-metrics"] ===== `metrics` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#hash[hash] * Default value is `{}` The metric(s) to use. This supports dynamic strings like %{host} @@ -106,7 +106,7 @@ but not both. [id="{version}-plugins-{type}s-{plugin}-metrics_format"] ===== `metrics_format` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"*"` Defines the format of the metric string. The placeholder '*' will be @@ -119,7 +119,7 @@ NOTE: If no metrics_format is defined, the name of the metric will be used as fa [id="{version}-plugins-{type}s-{plugin}-nested_object_separator"] ===== `nested_object_separator` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"."` When hashes are passed in as values they are broken out into a dotted notation @@ -136,7 +136,7 @@ This config setting changes the separator from the '.' default. [id="{version}-plugins-{type}s-{plugin}-port"] ===== `port` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2003` The port to connect to on the Graphite server. @@ -144,7 +144,7 @@ The port to connect to on the Graphite server. [id="{version}-plugins-{type}s-{plugin}-reconnect_interval"] ===== `reconnect_interval` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#number[number] * Default value is `2` Interval between reconnect attempts to Carbon. @@ -152,7 +152,7 @@ Interval between reconnect attempts to Carbon. [id="{version}-plugins-{type}s-{plugin}-resend_on_failure"] ===== `resend_on_failure` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#boolean[boolean] * Default value is `false` Should metrics be resent on failure? @@ -160,7 +160,7 @@ Should metrics be resent on failure? [id="{version}-plugins-{type}s-{plugin}-timestamp_field"] ===== `timestamp_field` - * Value type is <> + * Value type is {logstash-ref}/configuration-file-structure.html#string[string] * Default value is `"@timestamp"` Use this field for the timestamp instead of '@timestamp' which is the